summaryrefslogtreecommitdiff
path: root/deps/rabbit
diff options
context:
space:
mode:
Diffstat (limited to 'deps/rabbit')
-rw-r--r--deps/rabbit/.gitignore42
-rw-r--r--deps/rabbit/.travis.yml61
-rw-r--r--deps/rabbit/.travis.yml.patch11
-rw-r--r--deps/rabbit/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbit/CONTRIBUTING.md123
-rw-r--r--deps/rabbit/INSTALL2
-rw-r--r--deps/rabbit/LICENSE5
-rw-r--r--deps/rabbit/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbit/Makefile303
-rw-r--r--deps/rabbit/README.md65
-rw-r--r--deps/rabbit/SECURITY.md24
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/.gitignore12
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/Makefile11
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl76
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl38
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl174
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl228
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl11
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl520
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl104
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl115
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl47
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl114
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl93
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl22
-rw-r--r--deps/rabbit/docs/README-for-packages30
-rw-r--r--deps/rabbit/docs/README.md35
-rw-r--r--deps/rabbit/docs/advanced.config.example109
-rw-r--r--deps/rabbit/docs/rabbitmq-diagnostics.8725
-rw-r--r--deps/rabbit/docs/rabbitmq-echopid.870
-rw-r--r--deps/rabbit/docs/rabbitmq-env.conf.586
-rw-r--r--deps/rabbit/docs/rabbitmq-plugins.8254
-rw-r--r--deps/rabbit/docs/rabbitmq-queues.8202
-rw-r--r--deps/rabbit/docs/rabbitmq-server.898
-rw-r--r--deps/rabbit/docs/rabbitmq-server.service.example27
-rw-r--r--deps/rabbit/docs/rabbitmq-service.8152
-rw-r--r--deps/rabbit/docs/rabbitmq-upgrade.8108
-rw-r--r--deps/rabbit/docs/rabbitmq.conf.example1002
-rw-r--r--deps/rabbit/docs/rabbitmqctl.82424
-rw-r--r--deps/rabbit/docs/set_rabbitmq_policy.sh.example4
-rw-r--r--deps/rabbit/erlang.mk7808
-rw-r--r--deps/rabbit/include/amqqueue.hrl132
-rw-r--r--deps/rabbit/include/amqqueue_v1.hrl20
-rw-r--r--deps/rabbit/include/amqqueue_v2.hrl22
-rw-r--r--deps/rabbit/include/gm_specs.hrl15
-rw-r--r--deps/rabbit/include/vhost.hrl6
-rw-r--r--deps/rabbit/include/vhost_v1.hrl4
-rw-r--r--deps/rabbit/include/vhost_v2.hrl5
-rw-r--r--deps/rabbit/priv/schema/.gitignore4
-rw-r--r--deps/rabbit/priv/schema/rabbit.schema1791
-rw-r--r--deps/rabbit/rabbitmq-components.mk359
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-defaults18
-rw-r--r--deps/rabbit/scripts/rabbitmq-defaults.bat21
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-diagnostics23
-rw-r--r--deps/rabbit/scripts/rabbitmq-diagnostics.bat55
-rw-r--r--deps/rabbit/scripts/rabbitmq-echopid.bat57
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-env190
-rw-r--r--deps/rabbit/scripts/rabbitmq-env.bat173
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-plugins23
-rw-r--r--deps/rabbit/scripts/rabbitmq-plugins.bat56
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-queues23
-rw-r--r--deps/rabbit/scripts/rabbitmq-queues.bat56
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-rel58
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-server155
-rw-r--r--deps/rabbit/scripts/rabbitmq-server.bat91
-rw-r--r--deps/rabbit/scripts/rabbitmq-service.bat271
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-streams32
-rw-r--r--deps/rabbit/scripts/rabbitmq-streams.bat63
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-upgrade23
-rw-r--r--deps/rabbit/scripts/rabbitmq-upgrade.bat55
-rwxr-xr-xdeps/rabbit/scripts/rabbitmqctl23
-rw-r--r--deps/rabbit/scripts/rabbitmqctl.bat56
-rw-r--r--deps/rabbit/src/amqqueue.erl762
-rw-r--r--deps/rabbit/src/amqqueue_v1.erl584
-rw-r--r--deps/rabbit/src/background_gc.erl78
-rw-r--r--deps/rabbit/src/code_server_cache.erl81
-rw-r--r--deps/rabbit/src/gatherer.erl151
-rw-r--r--deps/rabbit/src/gm.erl1650
-rw-r--r--deps/rabbit/src/internal_user.erl216
-rw-r--r--deps/rabbit/src/internal_user_v1.erl151
-rw-r--r--deps/rabbit/src/lager_exchange_backend.erl233
-rw-r--r--deps/rabbit/src/lqueue.erl102
-rw-r--r--deps/rabbit/src/mirrored_supervisor_sups.erl34
-rw-r--r--deps/rabbit/src/pg_local.erl249
-rw-r--r--deps/rabbit/src/rabbit.erl1511
-rw-r--r--deps/rabbit/src/rabbit_access_control.erl257
-rw-r--r--deps/rabbit/src/rabbit_alarm.erl365
-rw-r--r--deps/rabbit/src/rabbit_amqqueue.erl1889
-rw-r--r--deps/rabbit/src/rabbit_amqqueue_process.erl1849
-rw-r--r--deps/rabbit/src/rabbit_amqqueue_sup.erl35
-rw-r--r--deps/rabbit/src/rabbit_amqqueue_sup_sup.erl84
-rw-r--r--deps/rabbit/src/rabbit_auth_backend_internal.erl1076
-rw-r--r--deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl54
-rw-r--r--deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl48
-rw-r--r--deps/rabbit/src/rabbit_auth_mechanism_plain.erl60
-rw-r--r--deps/rabbit/src/rabbit_autoheal.erl456
-rw-r--r--deps/rabbit/src/rabbit_backing_queue.erl264
-rw-r--r--deps/rabbit/src/rabbit_basic.erl354
-rw-r--r--deps/rabbit/src/rabbit_binding.erl691
-rw-r--r--deps/rabbit/src/rabbit_boot_steps.erl91
-rw-r--r--deps/rabbit/src/rabbit_channel.erl2797
-rw-r--r--deps/rabbit/src/rabbit_channel_interceptor.erl104
-rw-r--r--deps/rabbit/src/rabbit_channel_sup.erl92
-rw-r--r--deps/rabbit/src/rabbit_channel_sup_sup.erl42
-rw-r--r--deps/rabbit/src/rabbit_channel_tracking.erl291
-rw-r--r--deps/rabbit/src/rabbit_channel_tracking_handler.erl71
-rw-r--r--deps/rabbit/src/rabbit_classic_queue.erl527
-rw-r--r--deps/rabbit/src/rabbit_client_sup.erl43
-rw-r--r--deps/rabbit/src/rabbit_config.erl46
-rw-r--r--deps/rabbit/src/rabbit_confirms.erl152
-rw-r--r--deps/rabbit/src/rabbit_connection_helper_sup.erl57
-rw-r--r--deps/rabbit/src/rabbit_connection_sup.erl66
-rw-r--r--deps/rabbit/src/rabbit_connection_tracking.erl515
-rw-r--r--deps/rabbit/src/rabbit_connection_tracking_handler.erl80
-rw-r--r--deps/rabbit/src/rabbit_control_pbe.erl82
-rw-r--r--deps/rabbit/src/rabbit_core_ff.erl179
-rw-r--r--deps/rabbit/src/rabbit_core_metrics_gc.erl199
-rw-r--r--deps/rabbit/src/rabbit_credential_validation.erl44
-rw-r--r--deps/rabbit/src/rabbit_credential_validator.erl19
-rw-r--r--deps/rabbit/src/rabbit_credential_validator_accept_everything.erl23
-rw-r--r--deps/rabbit/src/rabbit_credential_validator_min_password_length.erl50
-rw-r--r--deps/rabbit/src/rabbit_credential_validator_password_regexp.erl42
-rw-r--r--deps/rabbit/src/rabbit_dead_letter.erl253
-rw-r--r--deps/rabbit/src/rabbit_definitions.erl767
-rw-r--r--deps/rabbit/src/rabbit_diagnostics.erl119
-rw-r--r--deps/rabbit/src/rabbit_direct.erl235
-rw-r--r--deps/rabbit/src/rabbit_disk_monitor.erl317
-rw-r--r--deps/rabbit/src/rabbit_epmd_monitor.erl104
-rw-r--r--deps/rabbit/src/rabbit_event_consumer.erl197
-rw-r--r--deps/rabbit/src/rabbit_exchange.erl592
-rw-r--r--deps/rabbit/src/rabbit_exchange_decorator.erl105
-rw-r--r--deps/rabbit/src/rabbit_exchange_parameters.erl39
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_direct.erl46
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_fanout.erl45
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_headers.erl136
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_invalid.erl45
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_topic.erl266
-rw-r--r--deps/rabbit/src/rabbit_feature_flags.erl2470
-rw-r--r--deps/rabbit/src/rabbit_ff_extra.erl244
-rw-r--r--deps/rabbit/src/rabbit_ff_registry.erl189
-rw-r--r--deps/rabbit/src/rabbit_fhc_helpers.erl45
-rw-r--r--deps/rabbit/src/rabbit_fifo.erl2124
-rw-r--r--deps/rabbit/src/rabbit_fifo.hrl210
-rw-r--r--deps/rabbit/src/rabbit_fifo_client.erl888
-rw-r--r--deps/rabbit/src/rabbit_fifo_index.erl119
-rw-r--r--deps/rabbit/src/rabbit_fifo_v0.erl1961
-rw-r--r--deps/rabbit/src/rabbit_fifo_v0.hrl195
-rw-r--r--deps/rabbit/src/rabbit_file.erl321
-rw-r--r--deps/rabbit/src/rabbit_framing.erl36
-rw-r--r--deps/rabbit/src/rabbit_guid.erl181
-rw-r--r--deps/rabbit/src/rabbit_health_check.erl80
-rw-r--r--deps/rabbit/src/rabbit_lager.erl723
-rw-r--r--deps/rabbit/src/rabbit_limiter.erl448
-rw-r--r--deps/rabbit/src/rabbit_log_tail.erl102
-rw-r--r--deps/rabbit/src/rabbit_looking_glass.erl48
-rw-r--r--deps/rabbit/src/rabbit_maintenance.erl354
-rw-r--r--deps/rabbit/src/rabbit_memory_monitor.erl259
-rw-r--r--deps/rabbit/src/rabbit_metrics.erl45
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_coordinator.erl460
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_master.erl578
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_misc.erl680
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_mode.erl42
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_mode_all.erl32
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl45
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl69
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_slave.erl1093
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_sync.erl420
-rw-r--r--deps/rabbit/src/rabbit_mnesia.erl1117
-rw-r--r--deps/rabbit/src/rabbit_mnesia_rename.erl276
-rw-r--r--deps/rabbit/src/rabbit_msg_file.erl114
-rw-r--r--deps/rabbit/src/rabbit_msg_record.erl400
-rw-r--r--deps/rabbit/src/rabbit_msg_store.erl2245
-rw-r--r--deps/rabbit/src/rabbit_msg_store_ets_index.erl76
-rw-r--r--deps/rabbit/src/rabbit_msg_store_gc.erl125
-rw-r--r--deps/rabbit/src/rabbit_networking.erl663
-rw-r--r--deps/rabbit/src/rabbit_node_monitor.erl926
-rw-r--r--deps/rabbit/src/rabbit_nodes.erl157
-rw-r--r--deps/rabbit/src/rabbit_osiris_metrics.erl103
-rw-r--r--deps/rabbit/src/rabbit_parameter_validation.erl88
-rw-r--r--deps/rabbit/src/rabbit_password.erl52
-rw-r--r--deps/rabbit/src/rabbit_password_hashing_md5.erl19
-rw-r--r--deps/rabbit/src/rabbit_password_hashing_sha256.erl15
-rw-r--r--deps/rabbit/src/rabbit_password_hashing_sha512.erl15
-rw-r--r--deps/rabbit/src/rabbit_peer_discovery.erl326
-rw-r--r--deps/rabbit/src/rabbit_peer_discovery_classic_config.erl75
-rw-r--r--deps/rabbit/src/rabbit_peer_discovery_dns.erl113
-rw-r--r--deps/rabbit/src/rabbit_plugins.erl699
-rw-r--r--deps/rabbit/src/rabbit_policies.erl179
-rw-r--r--deps/rabbit/src/rabbit_policy.erl557
-rw-r--r--deps/rabbit/src/rabbit_policy_merge_strategy.erl19
-rw-r--r--deps/rabbit/src/rabbit_prelaunch_cluster.erl22
-rw-r--r--deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl53
-rw-r--r--deps/rabbit/src/rabbit_prelaunch_feature_flags.erl32
-rw-r--r--deps/rabbit/src/rabbit_prelaunch_logging.erl75
-rw-r--r--deps/rabbit/src/rabbit_prequeue.erl100
-rw-r--r--deps/rabbit/src/rabbit_priority_queue.erl688
-rw-r--r--deps/rabbit/src/rabbit_queue_consumers.erl568
-rw-r--r--deps/rabbit/src/rabbit_queue_decorator.erl72
-rw-r--r--deps/rabbit/src/rabbit_queue_index.erl1521
-rw-r--r--deps/rabbit/src/rabbit_queue_location_client_local.erl39
-rw-r--r--deps/rabbit/src/rabbit_queue_location_min_masters.erl70
-rw-r--r--deps/rabbit/src/rabbit_queue_location_random.erl42
-rw-r--r--deps/rabbit/src/rabbit_queue_location_validator.erl67
-rw-r--r--deps/rabbit/src/rabbit_queue_master_location_misc.erl108
-rw-r--r--deps/rabbit/src/rabbit_queue_master_locator.erl19
-rw-r--r--deps/rabbit/src/rabbit_queue_type.erl581
-rw-r--r--deps/rabbit/src/rabbit_queue_type_util.erl74
-rw-r--r--deps/rabbit/src/rabbit_quorum_memory_manager.erl67
-rw-r--r--deps/rabbit/src/rabbit_quorum_queue.erl1523
-rw-r--r--deps/rabbit/src/rabbit_ra_registry.erl25
-rw-r--r--deps/rabbit/src/rabbit_reader.erl1803
-rw-r--r--deps/rabbit/src/rabbit_recovery_terms.erl240
-rw-r--r--deps/rabbit/src/rabbit_restartable_sup.erl33
-rw-r--r--deps/rabbit/src/rabbit_router.erl65
-rw-r--r--deps/rabbit/src/rabbit_runtime_parameters.erl412
-rw-r--r--deps/rabbit/src/rabbit_ssl.erl195
-rw-r--r--deps/rabbit/src/rabbit_stream_coordinator.erl949
-rw-r--r--deps/rabbit/src/rabbit_stream_queue.erl734
-rw-r--r--deps/rabbit/src/rabbit_sup.erl109
-rw-r--r--deps/rabbit/src/rabbit_sysmon_handler.erl235
-rw-r--r--deps/rabbit/src/rabbit_sysmon_minder.erl156
-rw-r--r--deps/rabbit/src/rabbit_table.erl416
-rw-r--r--deps/rabbit/src/rabbit_trace.erl128
-rw-r--r--deps/rabbit/src/rabbit_tracking.erl103
-rw-r--r--deps/rabbit/src/rabbit_upgrade.erl314
-rw-r--r--deps/rabbit/src/rabbit_upgrade_functions.erl662
-rw-r--r--deps/rabbit/src/rabbit_upgrade_preparation.erl51
-rw-r--r--deps/rabbit/src/rabbit_variable_queue.erl3015
-rw-r--r--deps/rabbit/src/rabbit_version.erl227
-rw-r--r--deps/rabbit/src/rabbit_vhost.erl422
-rw-r--r--deps/rabbit/src/rabbit_vhost_limit.erl205
-rw-r--r--deps/rabbit/src/rabbit_vhost_msg_store.erl68
-rw-r--r--deps/rabbit/src/rabbit_vhost_process.erl96
-rw-r--r--deps/rabbit/src/rabbit_vhost_sup.erl22
-rw-r--r--deps/rabbit/src/rabbit_vhost_sup_sup.erl271
-rw-r--r--deps/rabbit/src/rabbit_vhost_sup_wrapper.erl57
-rw-r--r--deps/rabbit/src/rabbit_vm.erl427
-rw-r--r--deps/rabbit/src/supervised_lifecycle.erl53
-rw-r--r--deps/rabbit/src/tcp_listener.erl90
-rw-r--r--deps/rabbit/src/tcp_listener_sup.erl54
-rw-r--r--deps/rabbit/src/term_to_binary_compat.erl15
-rw-r--r--deps/rabbit/src/vhost.erl172
-rw-r--r--deps/rabbit/src/vhost_v1.erl106
-rw-r--r--deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl302
-rw-r--r--deps/rabbit/test/backing_queue_SUITE.erl1632
-rw-r--r--deps/rabbit/test/channel_interceptor_SUITE.erl108
-rw-r--r--deps/rabbit/test/channel_operation_timeout_SUITE.erl198
-rw-r--r--deps/rabbit/test/channel_operation_timeout_test_queue.erl323
-rw-r--r--deps/rabbit/test/cluster_SUITE.erl307
-rw-r--r--deps/rabbit/test/cluster_rename_SUITE.erl301
-rw-r--r--deps/rabbit/test/clustering_management_SUITE.erl861
-rw-r--r--deps/rabbit/test/config_schema_SUITE.erl54
-rw-r--r--deps/rabbit/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbit/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbit/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets797
-rw-r--r--deps/rabbit/test/confirms_rejects_SUITE.erl412
-rw-r--r--deps/rabbit/test/consumer_timeout_SUITE.erl262
-rw-r--r--deps/rabbit/test/crashing_queues_SUITE.erl267
-rw-r--r--deps/rabbit/test/dead_lettering_SUITE.erl1174
-rw-r--r--deps/rabbit/test/definition_import_SUITE.erl257
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case1.json99
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case10/case10a.json67
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case10/case10b.json595
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case11.json24
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case13.json55
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case2.json49
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case3.json1
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case4.json49
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case5.json63
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case6.json47
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case7.json398
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case8.json17
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case9/case9a.json1
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case9/case9b.json1
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/failing_case12.json24
-rw-r--r--deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl111
-rw-r--r--deps/rabbit/test/dummy_event_receiver.erl49
-rw-r--r--deps/rabbit/test/dummy_interceptor.erl26
-rw-r--r--deps/rabbit/test/dummy_runtime_parameters.erl63
-rw-r--r--deps/rabbit/test/dummy_supervisor2.erl32
-rw-r--r--deps/rabbit/test/dynamic_ha_SUITE.erl1034
-rw-r--r--deps/rabbit/test/dynamic_qq_SUITE.erl248
-rw-r--r--deps/rabbit/test/eager_sync_SUITE.erl271
-rw-r--r--deps/rabbit/test/failing_dummy_interceptor.erl27
-rw-r--r--deps/rabbit/test/feature_flags_SUITE.erl1156
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore7
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/Makefile15
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/erlang.mk1
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/rabbitmq-components.mk1
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl10
-rw-r--r--deps/rabbit/test/honeycomb_cth.erl105
-rw-r--r--deps/rabbit/test/lazy_queue_SUITE.erl215
-rw-r--r--deps/rabbit/test/list_consumers_sanity_check_SUITE.erl125
-rw-r--r--deps/rabbit/test/list_queues_online_and_offline_SUITE.erl99
-rw-r--r--deps/rabbit/test/maintenance_mode_SUITE.erl284
-rw-r--r--deps/rabbit/test/many_node_ha_SUITE.erl112
-rw-r--r--deps/rabbit/test/message_size_limit_SUITE.erl145
-rw-r--r--deps/rabbit/test/metrics_SUITE.erl404
-rw-r--r--deps/rabbit/test/mirrored_supervisor_SUITE.erl328
-rw-r--r--deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl57
-rw-r--r--deps/rabbit/test/msg_store_SUITE.erl53
-rw-r--r--deps/rabbit/test/peer_discovery_classic_config_SUITE.erl179
-rw-r--r--deps/rabbit/test/peer_discovery_dns_SUITE.erl104
-rw-r--r--deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl1651
-rw-r--r--deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl182
-rw-r--r--deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl850
-rw-r--r--deps/rabbit/test/per_user_connection_tracking_SUITE.erl269
-rw-r--r--deps/rabbit/test/per_vhost_connection_limit_SUITE.erl751
-rw-r--r--deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl150
-rw-r--r--deps/rabbit/test/per_vhost_msg_store_SUITE.erl245
-rw-r--r--deps/rabbit/test/per_vhost_queue_limit_SUITE.erl682
-rw-r--r--deps/rabbit/test/policy_SUITE.erl204
-rw-r--r--deps/rabbit/test/priority_queue_SUITE.erl771
-rw-r--r--deps/rabbit/test/priority_queue_recovery_SUITE.erl144
-rw-r--r--deps/rabbit/test/product_info_SUITE.erl171
-rw-r--r--deps/rabbit/test/proxy_protocol_SUITE.erl91
-rw-r--r--deps/rabbit/test/publisher_confirms_parallel_SUITE.erl380
-rw-r--r--deps/rabbit/test/queue_length_limits_SUITE.erl382
-rw-r--r--deps/rabbit/test/queue_master_location_SUITE.erl487
-rw-r--r--deps/rabbit/test/queue_parallel_SUITE.erl725
-rw-r--r--deps/rabbit/test/queue_type_SUITE.erl275
-rw-r--r--deps/rabbit/test/quorum_queue_SUITE.erl2792
-rw-r--r--deps/rabbit/test/quorum_queue_utils.erl112
-rw-r--r--deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl46
-rw-r--r--deps/rabbit/test/rabbit_confirms_SUITE.erl154
-rw-r--r--deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl392
-rw-r--r--deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl19
-rw-r--r--deps/rabbit/test/rabbit_fifo_SUITE.erl1667
-rw-r--r--deps/rabbit/test/rabbit_fifo_int_SUITE.erl661
-rw-r--r--deps/rabbit/test/rabbit_fifo_prop_SUITE.erl1211
-rw-r--r--deps/rabbit/test/rabbit_fifo_v0_SUITE.erl1392
-rw-r--r--deps/rabbit/test/rabbit_foo_protocol_connection_info.erl25
-rw-r--r--deps/rabbit/test/rabbit_ha_test_consumer.erl108
-rw-r--r--deps/rabbit/test/rabbit_ha_test_producer.erl131
-rw-r--r--deps/rabbit/test/rabbit_msg_record_SUITE.erl213
-rw-r--r--deps/rabbit/test/rabbit_stream_queue_SUITE.erl1610
-rw-r--r--deps/rabbit/test/rabbitmq-env.bats128
-rw-r--r--deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl139
-rw-r--r--deps/rabbit/test/rabbitmqctl_integration_SUITE.erl164
-rw-r--r--deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl110
-rw-r--r--deps/rabbit/test/signal_handling_SUITE.erl160
-rw-r--r--deps/rabbit/test/simple_ha_SUITE.erl371
-rw-r--r--deps/rabbit/test/single_active_consumer_SUITE.erl376
-rw-r--r--deps/rabbit/test/sync_detection_SUITE.erl243
-rwxr-xr-xdeps/rabbit/test/temp/head_message_timestamp_tests.py131
-rwxr-xr-xdeps/rabbit/test/temp/rabbitmqadmin.py934
-rw-r--r--deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl105
-rw-r--r--deps/rabbit/test/test_util.erl28
-rw-r--r--deps/rabbit/test/topic_permission_SUITE.erl244
-rw-r--r--deps/rabbit/test/unit_access_control_SUITE.erl445
-rw-r--r--deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl127
-rw-r--r--deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl269
-rw-r--r--deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl231
-rw-r--r--deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl144
-rw-r--r--deps/rabbit/test/unit_app_management_SUITE.erl105
-rw-r--r--deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl71
-rw-r--r--deps/rabbit/test/unit_collections_SUITE.erl51
-rw-r--r--deps/rabbit/test/unit_config_value_encryption_SUITE.erl233
-rw-r--r--deps/rabbit/test/unit_config_value_encryption_SUITE_data/lib/rabbit_shovel_test/ebin/rabbit_shovel_test.app46
-rw-r--r--deps/rabbit/test/unit_config_value_encryption_SUITE_data/rabbit_shovel_test.passphrase1
-rw-r--r--deps/rabbit/test/unit_connection_tracking_SUITE.erl119
-rw-r--r--deps/rabbit/test/unit_credit_flow_SUITE.erl90
-rw-r--r--deps/rabbit/test/unit_disk_monitor_SUITE.erl90
-rw-r--r--deps/rabbit/test/unit_disk_monitor_mocks_SUITE.erl112
-rw-r--r--deps/rabbit/test/unit_file_handle_cache_SUITE.erl278
-rw-r--r--deps/rabbit/test/unit_gen_server2_SUITE.erl152
-rw-r--r--deps/rabbit/test/unit_gm_SUITE.erl242
-rw-r--r--deps/rabbit/test/unit_log_config_SUITE.erl837
-rw-r--r--deps/rabbit/test/unit_log_management_SUITE.erl413
-rw-r--r--deps/rabbit/test/unit_operator_policy_SUITE.erl107
-rw-r--r--deps/rabbit/test/unit_pg_local_SUITE.erl103
-rw-r--r--deps/rabbit/test/unit_plugin_directories_SUITE.erl76
-rw-r--r--deps/rabbit/test/unit_plugin_versioning_SUITE.erl170
-rw-r--r--deps/rabbit/test/unit_policy_validators_SUITE.erl207
-rw-r--r--deps/rabbit/test/unit_priority_queue_SUITE.erl184
-rw-r--r--deps/rabbit/test/unit_queue_consumers_SUITE.erl121
-rw-r--r--deps/rabbit/test/unit_stats_and_metrics_SUITE.erl266
-rw-r--r--deps/rabbit/test/unit_supervisor2_SUITE.erl69
-rw-r--r--deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl95
-rw-r--r--deps/rabbit/test/upgrade_preparation_SUITE.erl109
-rw-r--r--deps/rabbit/test/vhost_SUITE.erl381
382 files changed, 124294 insertions, 0 deletions
diff --git a/deps/rabbit/.gitignore b/deps/rabbit/.gitignore
new file mode 100644
index 0000000000..dc870136e8
--- /dev/null
+++ b/deps/rabbit/.gitignore
@@ -0,0 +1,42 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+MnesiaCore.*
+/.erlang.mk/
+/cover/
+/debug/
+/deps/
+/debug/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/etc/
+/logs/
+/plugins/
+/plugins.lock
+/test/ct.cover.spec
+/test/config_schema_SUITE_data/schema/**
+/xrefr
+/sbin/
+/sbin.lock
+rabbit.d
+
+# Generated documentation.
+docs/*.html
+
+# Dialyzer
+*.plt
+
+# Tracing tools
+*-ttb
+*.ti
+*.lz4*
+callgrind.out*
+callgraph.dot*
+
+PACKAGES/*
+
+rabbit-rabbitmq-deps.mk
diff --git a/deps/rabbit/.travis.yml b/deps/rabbit/.travis.yml
new file mode 100644
index 0000000000..a502fe1922
--- /dev/null
+++ b/deps/rabbit/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: E4FIneR14YxnWbNNgFY48+Z8YpnwWcvIR0nD1Mo06WKXaq05UfQNQ7SZCjI3kKCNJGMhac12DFRhln+mQZ+T92MQ7IeU3ugpV5RSm+JqIwwIKzVM3+bjCQnFoL24OD4E+GjhJQWYQmPyM7l4OPluMr2N8BtANItgzX3AvljvlSc=
+ - secure: L1t0CHGR4RzOXwtkpM6feRKax95rszScBLqzjstEiMPkhjTsYTlAecnNxx6lTrGMnk5hQoi4PtbhmyZOX0siHTngTogoA/Nyn8etYzicU5ZO+qmBQOYpegz51lEu70ewXgkhEHzk9DtEPxfYviH9WiILrdUVRXXgZpoXq13p1QA=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.10'
+otp_release:
+ - '22.3'
+ - '23.0'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make ct-fast
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbit/.travis.yml.patch b/deps/rabbit/.travis.yml.patch
new file mode 100644
index 0000000000..ca4041f5c0
--- /dev/null
+++ b/deps/rabbit/.travis.yml.patch
@@ -0,0 +1,11 @@
+--- ../rabbit_common/.travis.yml 2020-03-04 13:38:36.985065000 +0100
++++ .travis.yml 2020-03-04 14:27:50.983504000 +0100
+@@ -43,7 +43,7 @@
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+- - make tests
++ - make ct-fast
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+ after_failure:
diff --git a/deps/rabbit/CODE_OF_CONDUCT.md b/deps/rabbit/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbit/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbit/CONTRIBUTING.md b/deps/rabbit/CONTRIBUTING.md
new file mode 100644
index 0000000000..42af1f7517
--- /dev/null
+++ b/deps/rabbit/CONTRIBUTING.md
@@ -0,0 +1,123 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+To run a "fast suite" (a subset of tests):
+
+ make ct-fast
+
+To run a "slow suite" (a subset of tests that take much longer to run):
+
+ make ct-slow
+
+To run a particular suite:
+
+ make ct-$suite_name
+
+for example, to run the `backing_queue` suite:
+
+ make ct-backing_queue
+
+Finally,
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbit/INSTALL b/deps/rabbit/INSTALL
new file mode 100644
index 0000000000..d105eb5498
--- /dev/null
+++ b/deps/rabbit/INSTALL
@@ -0,0 +1,2 @@
+Please see https://www.rabbitmq.com/download.html for installation
+guides.
diff --git a/deps/rabbit/LICENSE b/deps/rabbit/LICENSE
new file mode 100644
index 0000000000..626a19fef0
--- /dev/null
+++ b/deps/rabbit/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ server is licensed under the MPL 2.0. For
+the MPL, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbit/LICENSE-MPL-RabbitMQ b/deps/rabbit/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbit/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile
new file mode 100644
index 0000000000..7d2fae2ea5
--- /dev/null
+++ b/deps/rabbit/Makefile
@@ -0,0 +1,303 @@
+PROJECT = rabbit
+PROJECT_DESCRIPTION = RabbitMQ
+PROJECT_MOD = rabbit
+PROJECT_REGISTERED = rabbit_amqqueue_sup \
+ rabbit_direct_client_sup \
+ rabbit_log \
+ rabbit_node_monitor \
+ rabbit_router
+
+define PROJECT_ENV
+[
+ {tcp_listeners, [5672]},
+ {num_tcp_acceptors, 10},
+ {ssl_listeners, []},
+ {num_ssl_acceptors, 10},
+ {ssl_options, []},
+ {vm_memory_high_watermark, 0.4},
+ {vm_memory_high_watermark_paging_ratio, 0.5},
+ {vm_memory_calculation_strategy, rss},
+ {memory_monitor_interval, 2500},
+ {disk_free_limit, 50000000}, %% 50MB
+ {msg_store_index_module, rabbit_msg_store_ets_index},
+ {backing_queue_module, rabbit_variable_queue},
+ %% 0 ("no limit") would make a better default, but that
+ %% breaks the QPid Java client
+ {frame_max, 131072},
+ %% see rabbitmq-server#1593
+ {channel_max, 2047},
+ {connection_max, infinity},
+ {heartbeat, 60},
+ {msg_store_file_size_limit, 16777216},
+ {msg_store_shutdown_timeout, 600000},
+ {fhc_write_buffering, true},
+ {fhc_read_buffering, false},
+ {queue_index_max_journal_entries, 32768},
+ {queue_index_embed_msgs_below, 4096},
+ {default_user, <<"guest">>},
+ {default_pass, <<"guest">>},
+ {default_user_tags, [administrator]},
+ {default_vhost, <<"/">>},
+ {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
+ {loopback_users, [<<"guest">>]},
+ {password_hashing_module, rabbit_password_hashing_sha256},
+ {server_properties, []},
+ {collect_statistics, none},
+ {collect_statistics_interval, 5000},
+ {mnesia_table_loading_retry_timeout, 30000},
+ {mnesia_table_loading_retry_limit, 10},
+ {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
+ {auth_backends, [rabbit_auth_backend_internal]},
+ {delegate_count, 16},
+ {trace_vhosts, []},
+ {ssl_cert_login_from, distinguished_name},
+ {ssl_handshake_timeout, 5000},
+ {ssl_allow_poodle_attack, false},
+ {handshake_timeout, 10000},
+ {reverse_dns_lookups, false},
+ {cluster_partition_handling, ignore},
+ {cluster_keepalive_interval, 10000},
+ {autoheal_state_transition_timeout, 60000},
+ {tcp_listen_options, [{backlog, 128},
+ {nodelay, true},
+ {linger, {true, 0}},
+ {exit_on_close, false}
+ ]},
+ {halt_on_upgrade_failure, true},
+ {ssl_apps, [asn1, crypto, public_key, ssl]},
+ %% see rabbitmq-server#114
+ {mirroring_flow_control, true},
+ {mirroring_sync_batch_size, 4096},
+ %% see rabbitmq-server#227 and related tickets.
+ %% msg_store_credit_disc_bound only takes effect when
+ %% messages are persisted to the message store. If messages
+ %% are embedded on the queue index, then modifying this
+ %% setting has no effect because credit_flow is not used when
+ %% writing to the queue index. See the setting
+ %% queue_index_embed_msgs_below above.
+ {msg_store_credit_disc_bound, {4000, 800}},
+ {msg_store_io_batch_size, 4096},
+ %% see rabbitmq-server#143,
+ %% rabbitmq-server#949, rabbitmq-server#1098
+ {credit_flow_default_credit, {400, 200}},
+ {quorum_commands_soft_limit, 32},
+ {quorum_cluster_size, 5},
+ %% see rabbitmq-server#248
+ %% and rabbitmq-server#667
+ {channel_operation_timeout, 15000},
+
+ %% see rabbitmq-server#486
+ {autocluster,
+ [{peer_discovery_backend, rabbit_peer_discovery_classic_config}]
+ },
+ %% used by rabbit_peer_discovery_classic_config
+ {cluster_nodes, {[], disc}},
+
+ {config_entry_decoder, [{passphrase, undefined}]},
+
+ %% rabbitmq-server#973
+ {queue_explicit_gc_run_operation_threshold, 1000},
+ {lazy_queue_explicit_gc_run_operation_threshold, 1000},
+ {background_gc_enabled, false},
+ {background_gc_target_interval, 60000},
+ %% rabbitmq-server#589
+ {proxy_protocol, false},
+ {disk_monitor_failure_retries, 10},
+ {disk_monitor_failure_retry_interval, 120000},
+ %% either "stop_node" or "continue".
+ %% by default we choose to not terminate the entire node if one
+ %% vhost had to shut down, see server#1158 and server#1280
+ {vhost_restart_strategy, continue},
+ %% {global, prefetch count}
+ {default_consumer_prefetch, {false, 0}},
+ %% interval at which the channel can perform periodic actions
+ {channel_tick_interval, 60000},
+ %% Default max message size is 128 MB
+ {max_message_size, 134217728},
+ %% Socket writer will run GC every 1 GB of outgoing data
+ {writer_gc_threshold, 1000000000},
+ %% interval at which connection/channel tracking executes post operations
+ {tracking_execution_timeout, 15000},
+ {stream_messages_soft_limit, 256},
+ {track_auth_attempt_source, false}
+ ]
+endef
+
+# With Erlang.mk default behavior, the value of `$(APPS_DIR)` is always
+# relative to the top-level executed Makefile. In our case, it could be
+# a plugin for instance. However, the rabbitmq_prelaunch application is
+# in this repository, not the plugin's. That's why we need to override
+# this value here.
+APPS_DIR := $(CURDIR)/apps
+
+LOCAL_DEPS = sasl rabbitmq_prelaunch os_mon inets compiler public_key crypto ssl syntax_tools xmerl
+BUILD_DEPS = rabbitmq_cli syslog
+DEPS = cuttlefish ranch lager rabbit_common ra sysmon_handler stdout_formatter recon observer_cli osiris amqp10_common
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client meck proper
+
+PLT_APPS += mnesia
+
+dep_cuttlefish = hex 2.4.1
+dep_syslog = git https://github.com/schlagert/syslog 3.4.5
+dep_osiris = git https://github.com/rabbitmq/osiris master
+
+define usage_xml_to_erl
+$(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1))))
+endef
+
+DOCS_DIR = docs
+MANPAGES = $(wildcard $(DOCS_DIR)/*.[0-9])
+WEB_MANPAGES = $(patsubst %,%.html,$(MANPAGES))
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
+ rabbit_common/mk/rabbitmq-dist.mk \
+ rabbit_common/mk/rabbitmq-run.mk \
+ rabbit_common/mk/rabbitmq-test.mk \
+ rabbit_common/mk/rabbitmq-tools.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
+
+# See above why we mess with `$(APPS_DIR)`.
+unexport APPS_DIR
+
+ifeq ($(strip $(BATS)),)
+BATS := $(ERLANG_MK_TMP)/bats/bin/bats
+endif
+
+BATS_GIT ?= https://github.com/sstephenson/bats
+BATS_COMMIT ?= v0.4.0
+
+$(BATS):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 --branch=$(BATS_COMMIT) $(BATS_GIT) $(ERLANG_MK_TMP)/bats
+
+.PHONY: bats
+
+bats: $(BATS)
+ $(verbose) $(BATS) $(TEST_DIR)
+
+tests:: bats
+
+SLOW_CT_SUITES := backing_queue \
+ channel_interceptor \
+ cluster \
+ cluster_rename \
+ clustering_management \
+ config_schema \
+ confirms_rejects \
+ consumer_timeout \
+ crashing_queues \
+ dynamic_ha \
+ dynamic_qq \
+ eager_sync \
+ feature_flags \
+ health_check \
+ lazy_queue \
+ many_node_ha \
+ metrics \
+ msg_store \
+ partitions \
+ per_user_connection_tracking \
+ per_vhost_connection_limit \
+ per_vhost_connection_limit_partitions \
+ per_vhost_msg_store \
+ per_vhost_queue_limit \
+ policy \
+ priority_queue \
+ priority_queue_recovery \
+ publisher_confirms_parallel \
+ queue_master_location \
+ queue_parallel \
+ quorum_queue \
+ rabbit_core_metrics_gc \
+ rabbit_fifo_prop \
+ rabbitmq_queues_cli_integration \
+ rabbitmqctl_integration \
+ simple_ha \
+ sync_detection \
+ unit_inbroker_non_parallel \
+ unit_inbroker_parallel \
+ vhost
+FAST_CT_SUITES := $(filter-out $(sort $(SLOW_CT_SUITES)),$(CT_SUITES))
+
+ct-fast: CT_SUITES = $(FAST_CT_SUITES)
+ct-slow: CT_SUITES = $(SLOW_CT_SUITES)
+
+# --------------------------------------------------------------------
+# Compilation.
+# --------------------------------------------------------------------
+
+RMQ_ERLC_OPTS += -I $(DEPS_DIR)/rabbit_common/include
+
+ifdef INSTRUMENT_FOR_QC
+RMQ_ERLC_OPTS += -DINSTR_MOD=gm_qc
+else
+RMQ_ERLC_OPTS += -DINSTR_MOD=gm
+endif
+
+ifdef CREDIT_FLOW_TRACING
+RMQ_ERLC_OPTS += -DCREDIT_FLOW_TRACING=true
+endif
+
+ifdef DEBUG_FF
+RMQ_ERLC_OPTS += -DDEBUG_QUORUM_QUEUE_FF=true
+endif
+
+ifndef USE_PROPER_QC
+# PropEr needs to be installed for property checking
+# http://proper.softlab.ntua.gr/
+USE_PROPER_QC := $(shell $(ERL) -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
+RMQ_ERLC_OPTS += $(if $(filter true,$(USE_PROPER_QC)),-Duse_proper_qc)
+endif
+
+# --------------------------------------------------------------------
+# Documentation.
+# --------------------------------------------------------------------
+
+.PHONY: manpages web-manpages distclean-manpages
+
+docs:: manpages web-manpages
+
+manpages: $(MANPAGES)
+ @:
+
+web-manpages: $(WEB_MANPAGES)
+ @:
+
+# We use mandoc(1) to convert manpages to HTML plus an awk script which
+# does:
+# 1. remove tables at the top and the bottom (they recall the
+# manpage name, section and date)
+# 2. "downgrade" headers by one level (eg. h1 -> h2)
+# 3. annotate .Dl lines with more CSS classes
+%.html: %
+ $(gen_verbose) mandoc -T html -O 'fragment,man=%N.%S.html' "$<" | \
+ awk '\
+ /^<table class="head">$$/ { remove_table=1; next; } \
+ /^<table class="foot">$$/ { remove_table=1; next; } \
+ /^<\/table>$$/ { if (remove_table) { remove_table=0; next; } } \
+ { if (!remove_table) { \
+ line=$$0; \
+ gsub(/<h2/, "<h3", line); \
+ gsub(/<\/h2>/, "</h3>", line); \
+ gsub(/<h1/, "<h2", line); \
+ gsub(/<\/h1>/, "</h2>", line); \
+ gsub(/class="D1"/, "class=\"D1 lang-bash\"", line); \
+ gsub(/class="Bd Bd-indent"/, "class=\"Bd Bd-indent lang-bash\"", line); \
+ gsub(/&#[xX]201[cCdD];/, "\\&quot;", line); \
+ print line; \
+ } } \
+ ' > "$@"
+
+distclean:: distclean-manpages
+
+distclean-manpages::
+ $(gen_verbose) rm -f $(WEB_MANPAGES)
diff --git a/deps/rabbit/README.md b/deps/rabbit/README.md
new file mode 100644
index 0000000000..28bb2699fd
--- /dev/null
+++ b/deps/rabbit/README.md
@@ -0,0 +1,65 @@
+[![OTP v22.3](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-server/Test%20-%20Erlang%2022.3/master?label=Erlang%2022.3)](https://github.com/rabbitmq/rabbitmq-server/actions?query=workflow%3A%22Test+-+Erlang+22.3%22+branch%3A%22master%22)
+[![OTP v23](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-server/Test%20-%20Erlang%2023.1/master?label=Erlang%2023.1)](https://github.com/rabbitmq/rabbitmq-server/actions?query=workflow%3A%22Test+-+Erlang+23.1%22+branch%3Amaster)
+
+# RabbitMQ Server
+
+[RabbitMQ](https://rabbitmq.com) is a [feature rich](https://rabbitmq.com/documentation.html), multi-protocol messaging broker. It supports:
+
+ * AMQP 0-9-1
+ * AMQP 1.0
+ * MQTT 3.1.1
+ * STOMP 1.0 through 1.2
+
+
+## Installation
+
+ * [Installation guides](https://rabbitmq.com/download.html) for various platforms
+ * [Kubernetes Cluster Operator](https://www.rabbitmq.com/kubernetes/operator/operator-overview.html)
+ * [Changelog](https://www.rabbitmq.com/changelog.html)
+ * [Releases](https://github.com/rabbitmq/rabbitmq-server/releases) on GitHub
+ * [Supported and unsupported series](https://www.rabbitmq.com/versions.html)
+ * [Supported Erlang versions](https://www.rabbitmq.com/which-erlang.html)
+
+
+## Tutorials & Documentation
+
+ * [RabbitMQ tutorials](https://rabbitmq.com/getstarted.html)
+ * [All documentation guides](https://rabbitmq.com/documentation.html)
+ * [CLI tools guide](https://rabbitmq.com/cli.html)
+ * [Configuration guide](https://rabbitmq.com/configure.html)
+ * [Client libraries and tools](https://rabbitmq.com/devtools.html)
+ * [Monitoring guide](https://rabbitmq.com/monitoring.html)
+ * [Production checklist](https://rabbitmq.com/production-checklist.html)
+ * [Runnable tutorials](https://github.com/rabbitmq/rabbitmq-tutorials/)
+ * [Documentation source](https://github.com/rabbitmq/rabbitmq-website/)
+
+
+## Getting Help
+
+ * [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users)
+ * [Commercial support](https://rabbitmq.com/services.html) from [Pivotal](https://pivotal.io) for open source RabbitMQ
+ * [Community Slack](https://rabbitmq-slack.herokuapp.com/)
+ * `#rabbitmq` on Freenode
+
+
+## Contributing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://rabbitmq.com/github.html).
+
+Questions about contributing, internals and so on are very welcome on the [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Licensing
+
+RabbitMQ server is [licensed under the MPL 2.0](LICENSE-MPL-RabbitMQ).
+
+
+## Building From Source and Packaging
+
+ * [Building RabbitMQ from Source](https://rabbitmq.com/build-server.html)
+ * [Building RabbitMQ Distribution Packages](https://rabbitmq.com/build-server.html)
+
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbit/SECURITY.md b/deps/rabbit/SECURITY.md
new file mode 100644
index 0000000000..762149554f
--- /dev/null
+++ b/deps/rabbit/SECURITY.md
@@ -0,0 +1,24 @@
+# Security Policy
+
+## Supported Versions
+
+See [RabbitMQ Release Series](https://www.rabbitmq.com/versions.html) for a list of currently supported
+versions.
+
+Vulnerabilities reported for versions out of support will not be investigated.
+
+
+## Reporting a Vulnerability
+
+Please responsibly disclosure vulnerabilities to `security@rabbitmq.com` and include the following information:
+
+ * RabbitMQ and Erlang versions used
+ * Operating system used
+ * A set of steps to reproduce the observed behavior
+ * An archive produced by [rabbitmq-collect-env](https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env)
+
+ RabbitMQ core team will get back to you after we have triaged the issue. If there's no sufficient reproduction
+ information available, we won't be able to act on the report.
+
+ RabbitMQ core team does not have a security vulnerability bounty programme at this time.
+
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore b/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore
new file mode 100644
index 0000000000..adca0d7655
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore
@@ -0,0 +1,12 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+/ebin/
+/.erlang.mk/
+/rabbitmq_prelaunch.d
+/xrefr
+
+# Dialyzer
+*.plt
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/Makefile b/deps/rabbit/apps/rabbitmq_prelaunch/Makefile
new file mode 100644
index 0000000000..572f7703d4
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/Makefile
@@ -0,0 +1,11 @@
+PROJECT = rabbitmq_prelaunch
+PROJECT_DESCRIPTION = RabbitMQ prelaunch setup
+PROJECT_VERSION = 1.0.0
+PROJECT_MOD = rabbit_prelaunch_app
+
+DEPS = rabbit_common lager
+
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk
+
+include ../../rabbitmq-components.mk
+include ../../erlang.mk
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl
new file mode 100644
index 0000000000..c76824e7be
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl
@@ -0,0 +1,76 @@
+%%%-------------------------------------------------------------------
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_boot_state).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-export([get/0,
+ set/1,
+ wait_for/2]).
+
+-define(PT_KEY_BOOT_STATE, {?MODULE, boot_state}).
+
+-type boot_state() :: 'stopped' | 'booting' | 'ready' | 'stopping'.
+
+-export_type([boot_state/0]).
+
+-spec get() -> boot_state().
+get() ->
+ persistent_term:get(?PT_KEY_BOOT_STATE, stopped).
+
+-spec set(boot_state()) -> ok.
+set(BootState) ->
+ rabbit_log_prelaunch:debug("Change boot state to `~s`", [BootState]),
+ ?assert(is_valid(BootState)),
+ case BootState of
+ stopped -> persistent_term:erase(?PT_KEY_BOOT_STATE);
+ _ -> persistent_term:put(?PT_KEY_BOOT_STATE, BootState)
+ end,
+ rabbit_boot_state_sup:notify_boot_state_listeners(BootState).
+
+-spec wait_for(boot_state(), timeout()) -> ok | {error, timeout}.
+wait_for(BootState, infinity) ->
+ case is_reached(BootState) of
+ true -> ok;
+ false -> Wait = 200,
+ timer:sleep(Wait),
+ wait_for(BootState, infinity)
+ end;
+wait_for(BootState, Timeout)
+ when is_integer(Timeout) andalso Timeout >= 0 ->
+ case is_reached(BootState) of
+ true -> ok;
+ false -> Wait = 200,
+ timer:sleep(Wait),
+ wait_for(BootState, Timeout - Wait)
+ end;
+wait_for(_, _) ->
+ {error, timeout}.
+
+boot_state_idx(stopped) -> 0;
+boot_state_idx(booting) -> 1;
+boot_state_idx(ready) -> 2;
+boot_state_idx(stopping) -> 3.
+
+is_valid(BootState) ->
+ is_integer(boot_state_idx(BootState)).
+
+is_reached(TargetBootState) ->
+ is_reached(?MODULE:get(), TargetBootState).
+
+is_reached(CurrentBootState, CurrentBootState) ->
+ true;
+is_reached(stopping, stopped) ->
+ false;
+is_reached(_CurrentBootState, stopped) ->
+ true;
+is_reached(stopped, _TargetBootState) ->
+ true;
+is_reached(CurrentBootState, TargetBootState) ->
+ boot_state_idx(TargetBootState) =< boot_state_idx(CurrentBootState).
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl
new file mode 100644
index 0000000000..fbdc5781fc
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl
@@ -0,0 +1,38 @@
+%%%-------------------------------------------------------------------
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_boot_state_sup).
+-behaviour(supervisor).
+
+-export([start_link/0,
+ init/1]).
+
+-export([notify_boot_state_listeners/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ SystemdSpec = #{id => rabbit_boot_state_systemd,
+ start => {rabbit_boot_state_systemd, start_link, []},
+ restart => transient},
+ {ok, {#{strategy => one_for_one,
+ intensity => 1,
+ period => 5},
+ [SystemdSpec]}}.
+
+-spec notify_boot_state_listeners(rabbit_boot_state:boot_state()) -> ok.
+notify_boot_state_listeners(BootState) ->
+ lists:foreach(
+ fun
+ ({_, Child, _, _}) when is_pid(Child) ->
+ gen_server:cast(Child, {notify_boot_state, BootState});
+ (_) ->
+ ok
+ end,
+ supervisor:which_children(?MODULE)).
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl
new file mode 100644
index 0000000000..f838535b6a
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl
@@ -0,0 +1,174 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2015-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_boot_state_systemd).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ terminate/2,
+ code_change/3]).
+
+-record(state, {mechanism,
+ sd_notify_module,
+ socket}).
+
+-define(LOG_PREFIX, "Boot state/systemd: ").
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init([]) ->
+ case os:type() of
+ {unix, _} ->
+ case code:load_file(sd_notify) of
+ {module, sd_notify} ->
+ {ok, #state{mechanism = legacy,
+ sd_notify_module = sd_notify}};
+ {error, _} ->
+ case os:getenv("NOTIFY_SOCKET") of
+ false ->
+ ignore;
+ "" ->
+ ignore;
+ Socket ->
+ {ok, #state{mechanism = socat,
+ socket = Socket}}
+ end
+ end;
+ _ ->
+ ignore
+ end.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast({notify_boot_state, BootState}, State) ->
+ notify_boot_state(BootState, State),
+ {noreply, State}.
+
+terminate(normal, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%% Private
+
+notify_boot_state(ready = BootState,
+ #state{mechanism = legacy, sd_notify_module = SDNotify}) ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "notifying of state `~s` (via native module)",
+ [BootState]),
+ sd_notify_legacy(SDNotify);
+notify_boot_state(ready = BootState,
+ #state{mechanism = socat, socket = Socket}) ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "notifying of state `~s` (via socat(1))",
+ [BootState]),
+ sd_notify_socat(Socket);
+notify_boot_state(BootState, _) ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "ignoring state `~s`",
+ [BootState]),
+ ok.
+
+sd_notify_message() ->
+ "READY=1\nSTATUS=Initialized\nMAINPID=" ++ os:getpid() ++ "\n".
+
+sd_notify_legacy(SDNotify) ->
+ SDNotify:sd_notify(0, sd_notify_message()).
+
+%% socat(1) is the most portable way the sd_notify could be
+%% implemented in erlang, without introducing some NIF. Currently the
+%% following issues prevent us from implementing it in a more
+%% reasonable way:
+%% - systemd-notify(1) is unstable for non-root users
+%% - erlang doesn't support unix domain sockets.
+%%
+%% Some details on how we ended with such a solution:
+%% https://github.com/rabbitmq/rabbitmq-server/issues/664
+sd_notify_socat(Socket) ->
+ case sd_current_unit() of
+ {ok, Unit} ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "systemd unit for activation check: \"~s\"~n",
+ [Unit]),
+ sd_notify_socat(Socket, Unit);
+ _ ->
+ ok
+ end.
+
+sd_notify_socat(Socket, Unit) ->
+ try sd_open_port(Socket) of
+ Port ->
+ Port ! {self(), {command, sd_notify_message()}},
+ Result = sd_wait_activation(Port, Unit),
+ port_close(Port),
+ Result
+ catch
+ Class:Reason ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "Failed to start socat(1): ~p:~p~n",
+ [Class, Reason]),
+ false
+ end.
+
+sd_current_unit() ->
+ CmdOut = os:cmd("ps -o unit= -p " ++ os:getpid()),
+ Ret = (catch re:run(CmdOut,
+ "([-.@0-9a-zA-Z]+)",
+ [unicode, {capture, all_but_first, list}])),
+ case Ret of
+ {'EXIT', _} -> error;
+ {match, [Unit]} -> {ok, Unit};
+ _ -> error
+ end.
+
+socat_socket_arg("@" ++ AbstractUnixSocket) ->
+ "abstract-sendto:" ++ AbstractUnixSocket;
+socat_socket_arg(UnixSocket) ->
+ "unix-sendto:" ++ UnixSocket.
+
+sd_open_port(Socket) ->
+ open_port(
+ {spawn_executable, os:find_executable("socat")},
+ [{args, [socat_socket_arg(Socket), "STDIO"]},
+ use_stdio, out]).
+
+sd_wait_activation(Port, Unit) ->
+ case os:find_executable("systemctl") of
+ false ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "systemctl(1) unavailable, falling back to sleep~n"),
+ timer:sleep(5000),
+ ok;
+ _ ->
+ sd_wait_activation(Port, Unit, 10)
+ end.
+
+sd_wait_activation(_, _, 0) ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "service still in 'activating' state, bailing out~n"),
+ ok;
+sd_wait_activation(Port, Unit, AttemptsLeft) ->
+ Ret = os:cmd("systemctl show --property=ActiveState -- '" ++ Unit ++ "'"),
+ case Ret of
+ "ActiveState=activating\n" ->
+ timer:sleep(1000),
+ sd_wait_activation(Port, Unit, AttemptsLeft - 1);
+ "ActiveState=" ++ _ ->
+ ok;
+ _ = Err ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "unexpected status from systemd: ~p~n", [Err]),
+ ok
+ end.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl
new file mode 100644
index 0000000000..b6b29481c7
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl
@@ -0,0 +1,228 @@
+-module(rabbit_prelaunch).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-export([run_prelaunch_first_phase/0,
+ assert_mnesia_is_stopped/0,
+ get_context/0,
+ get_stop_reason/0,
+ set_stop_reason/1,
+ clear_stop_reason/0,
+ is_initial_pass/0,
+ initial_pass_finished/0,
+ shutdown_func/1]).
+
+-ifdef(TEST).
+-export([store_context/1,
+ clear_context_cache/0]).
+-endif.
+
+-define(PT_KEY_CONTEXT, {?MODULE, context}).
+-define(PT_KEY_INITIAL_PASS, {?MODULE, initial_pass_finished}).
+-define(PT_KEY_SHUTDOWN_FUNC, {?MODULE, chained_shutdown_func}).
+-define(PT_KEY_STOP_REASON, {?MODULE, stop_reason}).
+
+run_prelaunch_first_phase() ->
+ try
+ do_run()
+ catch
+ throw:{error, _} = Error ->
+ rabbit_prelaunch_errors:log_error(Error),
+ set_stop_reason(Error),
+ rabbit_boot_state:set(stopped),
+ Error;
+ Class:Exception:Stacktrace ->
+ rabbit_prelaunch_errors:log_exception(
+ Class, Exception, Stacktrace),
+ Error = {error, Exception},
+ set_stop_reason(Error),
+ rabbit_boot_state:set(stopped),
+ Error
+ end.
+
+do_run() ->
+ %% Indicate RabbitMQ is booting.
+ clear_stop_reason(),
+ rabbit_boot_state:set(booting),
+
+ %% Configure dbg if requested.
+ rabbit_prelaunch_early_logging:enable_quick_dbg(rabbit_env:dbg_config()),
+
+ %% Setup signal handler.
+ ok = rabbit_prelaunch_sighandler:setup(),
+
+ %% We assert Mnesia is stopped before we run the prelaunch
+ %% phases.
+ %%
+ %% We need this because our cluster consistency check (in the second
+ %% phase) depends on Mnesia not being started before it has a chance
+ %% to run.
+ %%
+ %% Also, in the initial pass, we don't want Mnesia to run before
+ %% Erlang distribution is configured.
+ assert_mnesia_is_stopped(),
+
+ %% Get informations to setup logging.
+ Context0 = rabbit_env:get_context_before_logging_init(),
+ ?assertMatch(#{}, Context0),
+
+ %% Setup logging for the prelaunch phase.
+ ok = rabbit_prelaunch_early_logging:setup_early_logging(Context0, true),
+
+ IsInitialPass = is_initial_pass(),
+ case IsInitialPass of
+ true ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug(
+ "== Prelaunch phase [1/2] (initial pass) =="),
+ rabbit_log_prelaunch:debug("");
+ false ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Prelaunch phase [1/2] =="),
+ rabbit_log_prelaunch:debug("")
+ end,
+ rabbit_env:log_process_env(),
+
+ %% Load rabbitmq-env.conf, redo logging setup and continue.
+ Context1 = rabbit_env:get_context_after_logging_init(Context0),
+ ?assertMatch(#{}, Context1),
+ ok = rabbit_prelaunch_early_logging:setup_early_logging(Context1, true),
+ rabbit_env:log_process_env(),
+
+ %% Complete context now that we have the final environment loaded.
+ Context2 = rabbit_env:get_context_after_reloading_env(Context1),
+ ?assertMatch(#{}, Context2),
+ store_context(Context2),
+ rabbit_env:log_context(Context2),
+ ok = setup_shutdown_func(),
+
+ Context = Context2#{initial_pass => IsInitialPass},
+
+ rabbit_env:context_to_code_path(Context),
+ rabbit_env:context_to_app_env_vars(Context),
+
+ %% 1. Erlang/OTP compatibility check.
+ ok = rabbit_prelaunch_erlang_compat:check(Context),
+
+ %% 2. Configuration check + loading.
+ ok = rabbit_prelaunch_conf:setup(Context),
+
+ %% 3. Erlang distribution check + start.
+ ok = rabbit_prelaunch_dist:setup(Context),
+
+ %% 4. Write PID file.
+ rabbit_log_prelaunch:debug(""),
+ _ = write_pid_file(Context),
+ ignore.
+
+assert_mnesia_is_stopped() ->
+ ?assertNot(lists:keymember(mnesia, 1, application:which_applications())).
+
+store_context(Context) when is_map(Context) ->
+ persistent_term:put(?PT_KEY_CONTEXT, Context).
+
+get_context() ->
+ case persistent_term:get(?PT_KEY_CONTEXT, undefined) of
+ undefined -> undefined;
+ Context -> Context#{initial_pass => is_initial_pass()}
+ end.
+
+-ifdef(TEST).
+clear_context_cache() ->
+ persistent_term:erase(?PT_KEY_CONTEXT).
+-endif.
+
+get_stop_reason() ->
+ persistent_term:get(?PT_KEY_STOP_REASON, undefined).
+
+set_stop_reason(Reason) ->
+ case get_stop_reason() of
+ undefined ->
+ rabbit_log_prelaunch:debug("Set stop reason to: ~p", [Reason]),
+ persistent_term:put(?PT_KEY_STOP_REASON, Reason);
+ _ ->
+ ok
+ end.
+
+clear_stop_reason() ->
+ persistent_term:erase(?PT_KEY_STOP_REASON).
+
+is_initial_pass() ->
+ not persistent_term:get(?PT_KEY_INITIAL_PASS, false).
+
+initial_pass_finished() ->
+ persistent_term:put(?PT_KEY_INITIAL_PASS, true).
+
+setup_shutdown_func() ->
+ ThisMod = ?MODULE,
+ ThisFunc = shutdown_func,
+ ExistingShutdownFunc = application:get_env(kernel, shutdown_func),
+ case ExistingShutdownFunc of
+ {ok, {ThisMod, ThisFunc}} ->
+ ok;
+ {ok, {ExistingMod, ExistingFunc}} ->
+ rabbit_log_prelaunch:debug(
+ "Setting up kernel shutdown function: ~s:~s/1 "
+ "(chained with ~s:~s/1)",
+ [ThisMod, ThisFunc, ExistingMod, ExistingFunc]),
+ ok = persistent_term:put(
+ ?PT_KEY_SHUTDOWN_FUNC,
+ ExistingShutdownFunc),
+ ok = record_kernel_shutdown_func(ThisMod, ThisFunc);
+ _ ->
+ rabbit_log_prelaunch:debug(
+ "Setting up kernel shutdown function: ~s:~s/1",
+ [ThisMod, ThisFunc]),
+ ok = record_kernel_shutdown_func(ThisMod, ThisFunc)
+ end.
+
+record_kernel_shutdown_func(Mod, Func) ->
+ application:set_env(
+ kernel, shutdown_func, {Mod, Func},
+ [{persistent, true}]).
+
+shutdown_func(Reason) ->
+ rabbit_log_prelaunch:debug(
+ "Running ~s:shutdown_func() as part of `kernel` shutdown", [?MODULE]),
+ Context = get_context(),
+ remove_pid_file(Context),
+ ChainedShutdownFunc = persistent_term:get(
+ ?PT_KEY_SHUTDOWN_FUNC,
+ undefined),
+ case ChainedShutdownFunc of
+ {ChainedMod, ChainedFunc} -> ChainedMod:ChainedFunc(Reason);
+ _ -> ok
+ end.
+
+write_pid_file(#{pid_file := PidFile}) ->
+ rabbit_log_prelaunch:debug("Writing PID file: ~s", [PidFile]),
+ case filelib:ensure_dir(PidFile) of
+ ok ->
+ OSPid = os:getpid(),
+ case file:write_file(PidFile, OSPid) of
+ ok ->
+ ok;
+ {error, Reason} = Error ->
+ rabbit_log_prelaunch:warning(
+ "Failed to write PID file \"~s\": ~s",
+ [PidFile, file:format_error(Reason)]),
+ Error
+ end;
+ {error, Reason} = Error ->
+ rabbit_log_prelaunch:warning(
+ "Failed to create PID file \"~s\" directory: ~s",
+ [PidFile, file:format_error(Reason)]),
+ Error
+ end;
+write_pid_file(_) ->
+ ok.
+
+remove_pid_file(#{pid_file := PidFile, keep_pid_file_on_exit := true}) ->
+ rabbit_log_prelaunch:debug("Keeping PID file: ~s", [PidFile]),
+ ok;
+remove_pid_file(#{pid_file := PidFile}) ->
+ rabbit_log_prelaunch:debug("Deleting PID file: ~s", [PidFile]),
+ _ = file:delete(PidFile),
+ ok;
+remove_pid_file(_) ->
+ ok.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl
new file mode 100644
index 0000000000..cef7f05e77
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl
@@ -0,0 +1,11 @@
+-module(rabbit_prelaunch_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ rabbit_prelaunch_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl
new file mode 100644
index 0000000000..fbbae7a185
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl
@@ -0,0 +1,520 @@
+-module(rabbit_prelaunch_conf).
+
+-include_lib("kernel/include/file.hrl").
+-include_lib("stdlib/include/zip.hrl").
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([setup/1,
+ get_config_state/0,
+ generate_config_from_cuttlefish_files/3,
+ decrypt_config/1]).
+
+-ifdef(TEST).
+-export([decrypt_config/2]).
+-endif.
+
+setup(Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Configuration =="),
+
+ %% TODO: Check if directories/files are inside Mnesia dir.
+
+ ok = set_default_config(),
+
+ AdditionalConfigFiles = find_additional_config_files(Context),
+ AdvancedConfigFile = find_actual_advanced_config_file(Context),
+ State = case find_actual_main_config_file(Context) of
+ {MainConfigFile, erlang} ->
+ Config = load_cuttlefish_config_file(Context,
+ AdditionalConfigFiles,
+ MainConfigFile),
+ Apps = [App || {App, _} <- Config],
+ decrypt_config(Apps),
+ #{config_files => AdditionalConfigFiles,
+ config_advanced_file => MainConfigFile};
+ {MainConfigFile, cuttlefish} ->
+ ConfigFiles = [MainConfigFile | AdditionalConfigFiles],
+ Config = load_cuttlefish_config_file(Context,
+ ConfigFiles,
+ AdvancedConfigFile),
+ Apps = [App || {App, _} <- Config],
+ decrypt_config(Apps),
+ #{config_files => ConfigFiles,
+ config_advanced_file => AdvancedConfigFile};
+ undefined when AdditionalConfigFiles =/= [] ->
+ ConfigFiles = AdditionalConfigFiles,
+ Config = load_cuttlefish_config_file(Context,
+ ConfigFiles,
+ AdvancedConfigFile),
+ Apps = [App || {App, _} <- Config],
+ decrypt_config(Apps),
+ #{config_files => ConfigFiles,
+ config_advanced_file => AdvancedConfigFile};
+ undefined when AdvancedConfigFile =/= undefined ->
+ rabbit_log_prelaunch:warning(
+ "Using RABBITMQ_ADVANCED_CONFIG_FILE: ~s",
+ [AdvancedConfigFile]),
+ Config = load_cuttlefish_config_file(Context,
+ AdditionalConfigFiles,
+ AdvancedConfigFile),
+ Apps = [App || {App, _} <- Config],
+ decrypt_config(Apps),
+ #{config_files => AdditionalConfigFiles,
+ config_advanced_file => AdvancedConfigFile};
+ undefined ->
+ #{config_files => [],
+ config_advanced_file => undefined}
+ end,
+ ok = override_with_hard_coded_critical_config(),
+ ok = set_credentials_obfuscation_secret(),
+ rabbit_log_prelaunch:debug(
+ "Saving config state to application env: ~p", [State]),
+ store_config_state(State).
+
+store_config_state(ConfigState) ->
+ persistent_term:put({rabbitmq_prelaunch, config_state}, ConfigState).
+
+get_config_state() ->
+ persistent_term:get({rabbitmq_prelaunch, config_state}, undefined).
+
+%% -------------------------------------------------------------------
+%% Configuration loading.
+%% -------------------------------------------------------------------
+
+set_default_config() ->
+ rabbit_log_prelaunch:debug("Setting default config"),
+ Config = [
+ {ra,
+ [
+ {wal_max_size_bytes, 536870912}, %% 5 * 2 ^ 20
+ {wal_max_batch_size, 4096}
+ ]},
+ {aten,
+ [
+ %% a greater poll interval has shown to trigger fewer false
+ %% positive leader elections in quorum queues. The cost is slightly
+ %% longer detection time when a genuine network issue occurs.
+ %% Ra still uses erlang monitors of course so whenever a connection
+ %% goes down it is still immediately detected
+ {poll_interval, 5000}
+ ]},
+ {sysmon_handler,
+ [{process_limit, 100},
+ {port_limit, 100},
+ {gc_ms_limit, 0},
+ {schedule_ms_limit, 0},
+ {heap_word_limit, 0},
+ {busy_port, false},
+ {busy_dist_port, true}]}
+ ],
+ apply_erlang_term_based_config(Config).
+
+find_actual_main_config_file(#{main_config_file := File}) ->
+ case filelib:is_regular(File) of
+ true ->
+ Format = case filename:extension(File) of
+ ".conf" -> cuttlefish;
+ ".config" -> erlang;
+ _ -> determine_config_format(File)
+ end,
+ {File, Format};
+ false ->
+ OldFormatFile = File ++ ".config",
+ NewFormatFile = File ++ ".conf",
+ case filelib:is_regular(OldFormatFile) of
+ true ->
+ case filelib:is_regular(NewFormatFile) of
+ true ->
+ rabbit_log_prelaunch:warning(
+ "Both old (.config) and new (.conf) format "
+ "config files exist."),
+ rabbit_log_prelaunch:warning(
+ "Using the old format config file: ~s",
+ [OldFormatFile]),
+ rabbit_log_prelaunch:warning(
+ "Please update your config files to the new "
+ "format and remove the old file."),
+ ok;
+ false ->
+ ok
+ end,
+ {OldFormatFile, erlang};
+ false ->
+ case filelib:is_regular(NewFormatFile) of
+ true -> {NewFormatFile, cuttlefish};
+ false -> undefined
+ end
+ end
+ end.
+
+find_additional_config_files(#{additional_config_files := Pattern})
+ when Pattern =/= undefined ->
+ Pattern1 = case filelib:is_dir(Pattern) of
+ true -> filename:join(Pattern, "*");
+ false -> Pattern
+ end,
+ OnlyFiles = [File ||
+ File <- filelib:wildcard(Pattern1),
+ filelib:is_regular(File)],
+ lists:sort(OnlyFiles);
+find_additional_config_files(_) ->
+ [].
+
+find_actual_advanced_config_file(#{advanced_config_file := File}) ->
+ case filelib:is_regular(File) of
+ true -> File;
+ false -> undefined
+ end.
+
+determine_config_format(File) ->
+ case filelib:file_size(File) of
+ 0 ->
+ cuttlefish;
+ _ ->
+ case file:consult(File) of
+ {ok, _} -> erlang;
+ _ -> cuttlefish
+ end
+ end.
+
+load_cuttlefish_config_file(Context,
+ ConfigFiles,
+ AdvancedConfigFile) ->
+ Config = generate_config_from_cuttlefish_files(
+ Context, ConfigFiles, AdvancedConfigFile),
+ apply_erlang_term_based_config(Config),
+ Config.
+
+generate_config_from_cuttlefish_files(Context,
+ ConfigFiles,
+ AdvancedConfigFile) ->
+ %% Load schemas.
+ SchemaFiles = find_cuttlefish_schemas(Context),
+ case SchemaFiles of
+ [] ->
+ rabbit_log_prelaunch:error(
+ "No configuration schema found~n", []),
+ throw({error, no_configuration_schema_found});
+ _ ->
+ rabbit_log_prelaunch:debug(
+ "Configuration schemas found:~n", []),
+ lists:foreach(
+ fun(SchemaFile) ->
+ rabbit_log_prelaunch:debug(" - ~ts", [SchemaFile])
+ end,
+ SchemaFiles),
+ ok
+ end,
+ Schema = cuttlefish_schema:files(SchemaFiles),
+
+ %% Load configuration.
+ rabbit_log_prelaunch:debug(
+ "Loading configuration files (Cuttlefish based):"),
+ lists:foreach(
+ fun(ConfigFile) ->
+ rabbit_log_prelaunch:debug(" - ~ts", [ConfigFile])
+ end, ConfigFiles),
+ case cuttlefish_conf:files(ConfigFiles) of
+ {errorlist, Errors} ->
+ rabbit_log_prelaunch:error("Error parsing configuration:"),
+ lists:foreach(
+ fun(Error) ->
+ rabbit_log_prelaunch:error(
+ " - ~ts",
+ [cuttlefish_error:xlate(Error)])
+ end, Errors),
+ rabbit_log_prelaunch:error(
+ "Are these files using the Cuttlefish format?"),
+ throw({error, failed_to_parse_configuration_file});
+ Config0 ->
+ %% Finalize configuration, based on the schema.
+ Config = case cuttlefish_generator:map(Schema, Config0) of
+ {error, Phase, {errorlist, Errors}} ->
+ %% TODO
+ rabbit_log_prelaunch:error(
+ "Error preparing configuration in phase ~ts:",
+ [Phase]),
+ lists:foreach(
+ fun(Error) ->
+ rabbit_log_prelaunch:error(
+ " - ~ts",
+ [cuttlefish_error:xlate(Error)])
+ end, Errors),
+ throw(
+ {error, failed_to_prepare_configuration});
+ ValidConfig ->
+ proplists:delete(vm_args, ValidConfig)
+ end,
+
+ %% Apply advanced configuration overrides, if any.
+ override_with_advanced_config(Config, AdvancedConfigFile)
+ end.
+
+find_cuttlefish_schemas(Context) ->
+ Apps = list_apps(Context),
+ rabbit_log_prelaunch:debug(
+ "Looking up configuration schemas in the following applications:"),
+ find_cuttlefish_schemas(Apps, []).
+
+find_cuttlefish_schemas([App | Rest], AllSchemas) ->
+ Schemas = list_schemas_in_app(App),
+ find_cuttlefish_schemas(Rest, AllSchemas ++ Schemas);
+find_cuttlefish_schemas([], AllSchemas) ->
+ lists:sort(fun(A,B) -> A < B end, AllSchemas).
+
+list_apps(#{os_type := {win32, _}, plugins_path := PluginsPath}) ->
+ PluginsDirs = string:lexemes(PluginsPath, ";"),
+ list_apps1(PluginsDirs, []);
+list_apps(#{plugins_path := PluginsPath}) ->
+ PluginsDirs = string:lexemes(PluginsPath, ":"),
+ list_apps1(PluginsDirs, []).
+
+
+list_apps1([Dir | Rest], Apps) ->
+ case file:list_dir(Dir) of
+ {ok, Filenames} ->
+ NewApps = [list_to_atom(
+ hd(
+ string:split(filename:basename(F, ".ex"), "-")))
+ || F <- Filenames],
+ Apps1 = lists:umerge(Apps, lists:sort(NewApps)),
+ list_apps1(Rest, Apps1);
+ {error, Reason} ->
+ rabbit_log_prelaunch:debug(
+ "Failed to list directory \"~ts\" content: ~ts",
+ [Dir, file:format_error(Reason)]),
+ list_apps1(Rest, Apps)
+ end;
+list_apps1([], AppInfos) ->
+ AppInfos.
+
+list_schemas_in_app(App) ->
+ {Loaded, Unload} = case application:load(App) of
+ ok -> {true, true};
+ {error, {already_loaded, _}} -> {true, false};
+ {error, Reason} -> {Reason, false}
+ end,
+ List = case Loaded of
+ true ->
+ case code:priv_dir(App) of
+ {error, bad_name} ->
+ rabbit_log_prelaunch:debug(
+ " [ ] ~s (no readable priv dir)", [App]),
+ [];
+ PrivDir ->
+ SchemaDir = filename:join([PrivDir, "schema"]),
+ do_list_schemas_in_app(App, SchemaDir)
+ end;
+ Reason1 ->
+ rabbit_log_prelaunch:debug(
+ " [ ] ~s (failed to load application: ~p)",
+ [App, Reason1]),
+ []
+ end,
+ case Unload of
+ true -> _ = application:unload(App),
+ ok;
+ false -> ok
+ end,
+ List.
+
+do_list_schemas_in_app(App, SchemaDir) ->
+ case erl_prim_loader:list_dir(SchemaDir) of
+ {ok, Files} ->
+ rabbit_log_prelaunch:debug(" [x] ~s", [App]),
+ [filename:join(SchemaDir, File)
+ || [C | _] = File <- Files,
+ C =/= $.];
+ error ->
+ rabbit_log_prelaunch:debug(
+ " [ ] ~s (no readable schema dir)", [App]),
+ []
+ end.
+
+override_with_advanced_config(Config, undefined) ->
+ Config;
+override_with_advanced_config(Config, AdvancedConfigFile) ->
+ rabbit_log_prelaunch:debug(
+ "Override with advanced configuration file \"~ts\"",
+ [AdvancedConfigFile]),
+ case file:consult(AdvancedConfigFile) of
+ {ok, [AdvancedConfig]} ->
+ cuttlefish_advanced:overlay(Config, AdvancedConfig);
+ {ok, OtherTerms} ->
+ rabbit_log_prelaunch:error(
+ "Failed to load advanced configuration file \"~ts\", "
+ "incorrect format: ~p",
+ [AdvancedConfigFile, OtherTerms]),
+ throw({error, failed_to_parse_advanced_configuration_file});
+ {error, Reason} ->
+ rabbit_log_prelaunch:error(
+ "Failed to load advanced configuration file \"~ts\": ~ts",
+ [AdvancedConfigFile, file:format_error(Reason)]),
+ throw({error, failed_to_read_advanced_configuration_file})
+ end.
+
+override_with_hard_coded_critical_config() ->
+ rabbit_log_prelaunch:debug("Override with hard-coded critical config"),
+ Config = [
+ {ra,
+ %% Make Ra use a custom logger that dispatches to lager
+ %% instead of the default OTP logger
+ [{logger_module, rabbit_log_ra_shim}]},
+ {osiris,
+ [{logger_module, rabbit_log_osiris_shim}]}
+ ],
+ apply_erlang_term_based_config(Config).
+
+apply_erlang_term_based_config([{_, []} | Rest]) ->
+ apply_erlang_term_based_config(Rest);
+apply_erlang_term_based_config([{App, Vars} | Rest]) ->
+ rabbit_log_prelaunch:debug(" Applying configuration for '~s':", [App]),
+ ok = apply_app_env_vars(App, Vars),
+ apply_erlang_term_based_config(Rest);
+apply_erlang_term_based_config([]) ->
+ ok.
+
+apply_app_env_vars(App, [{Var, Value} | Rest]) ->
+ rabbit_log_prelaunch:debug(" - ~s = ~p", [Var, Value]),
+ ok = application:set_env(App, Var, Value, [{persistent, true}]),
+ apply_app_env_vars(App, Rest);
+apply_app_env_vars(_, []) ->
+ ok.
+
+set_credentials_obfuscation_secret() ->
+ rabbit_log_prelaunch:debug(
+ "Refreshing credentials obfuscation configuration from env: ~p",
+ [application:get_all_env(credentials_obfuscation)]),
+ ok = credentials_obfuscation:refresh_config(),
+ CookieBin = rabbit_data_coercion:to_binary(erlang:get_cookie()),
+ rabbit_log_prelaunch:debug(
+ "Setting credentials obfuscation secret to '~s'", [CookieBin]),
+ ok = credentials_obfuscation:set_secret(CookieBin).
+
+%% -------------------------------------------------------------------
+%% Config decryption.
+%% -------------------------------------------------------------------
+
+decrypt_config(Apps) ->
+ rabbit_log_prelaunch:debug("Decoding encrypted config values (if any)"),
+ ConfigEntryDecoder = application:get_env(rabbit, config_entry_decoder, []),
+ decrypt_config(Apps, ConfigEntryDecoder).
+
+decrypt_config([], _) ->
+ ok;
+decrypt_config([App | Apps], Algo) ->
+ Algo1 = decrypt_app(App, application:get_all_env(App), Algo),
+ decrypt_config(Apps, Algo1).
+
+decrypt_app(_, [], Algo) ->
+ Algo;
+decrypt_app(App, [{Key, Value} | Tail], Algo) ->
+ Algo2 = try
+ case decrypt(Value, Algo) of
+ {Value, Algo1} ->
+ Algo1;
+ {NewValue, Algo1} ->
+ rabbit_log_prelaunch:debug(
+ "Value of `~s` decrypted", [Key]),
+ ok = application:set_env(App, Key, NewValue,
+ [{persistent, true}]),
+ Algo1
+ end
+ catch
+ throw:{bad_config_entry_decoder, _} = Error ->
+ throw(Error);
+ _:Msg ->
+ throw({config_decryption_error, {key, Key}, Msg})
+ end,
+ decrypt_app(App, Tail, Algo2).
+
+decrypt({encrypted, _} = EncValue,
+ {Cipher, Hash, Iterations, PassPhrase} = Algo) ->
+ {rabbit_pbe:decrypt_term(Cipher, Hash, Iterations, PassPhrase, EncValue),
+ Algo};
+decrypt({encrypted, _} = EncValue,
+ ConfigEntryDecoder)
+ when is_list(ConfigEntryDecoder) ->
+ Algo = config_entry_decoder_to_algo(ConfigEntryDecoder),
+ decrypt(EncValue, Algo);
+decrypt(List, Algo) when is_list(List) ->
+ decrypt_list(List, Algo, []);
+decrypt(Value, Algo) ->
+ {Value, Algo}.
+
+%% We make no distinction between strings and other lists.
+%% When we receive a string, we loop through each element
+%% and ultimately return the string unmodified, as intended.
+decrypt_list([], Algo, Acc) ->
+ {lists:reverse(Acc), Algo};
+decrypt_list([{Key, Value} | Tail], Algo, Acc)
+ when Key =/= encrypted ->
+ {Value1, Algo1} = decrypt(Value, Algo),
+ decrypt_list(Tail, Algo1, [{Key, Value1} | Acc]);
+decrypt_list([Value | Tail], Algo, Acc) ->
+ {Value1, Algo1} = decrypt(Value, Algo),
+ decrypt_list(Tail, Algo1, [Value1 | Acc]).
+
+config_entry_decoder_to_algo(ConfigEntryDecoder) ->
+ case get_passphrase(ConfigEntryDecoder) of
+ undefined ->
+ throw({bad_config_entry_decoder, missing_passphrase});
+ PassPhrase ->
+ {
+ proplists:get_value(
+ cipher, ConfigEntryDecoder, rabbit_pbe:default_cipher()),
+ proplists:get_value(
+ hash, ConfigEntryDecoder, rabbit_pbe:default_hash()),
+ proplists:get_value(
+ iterations, ConfigEntryDecoder,
+ rabbit_pbe:default_iterations()),
+ PassPhrase
+ }
+ end.
+
+get_passphrase(ConfigEntryDecoder) ->
+ rabbit_log_prelaunch:debug("Getting encrypted config passphrase"),
+ case proplists:get_value(passphrase, ConfigEntryDecoder) of
+ prompt ->
+ IoDevice = get_input_iodevice(),
+ ok = io:setopts(IoDevice, [{echo, false}]),
+ PP = lists:droplast(io:get_line(IoDevice,
+ "\nPlease enter the passphrase to unlock encrypted "
+ "configuration entries.\n\nPassphrase: ")),
+ ok = io:setopts(IoDevice, [{echo, true}]),
+ io:format(IoDevice, "~n", []),
+ PP;
+ {file, Filename} ->
+ {ok, File} = file:read_file(Filename),
+ [PP|_] = binary:split(File, [<<"\r\n">>, <<"\n">>]),
+ PP;
+ PP ->
+ PP
+ end.
+
+%% This function retrieves the correct IoDevice for requesting
+%% input. The problem with using the default IoDevice is that
+%% the Erlang shell prevents us from getting the input.
+%%
+%% Instead we therefore look for the io process used by the
+%% shell and if it can't be found (because the shell is not
+%% started e.g with -noshell) we use the 'user' process.
+%%
+%% This function will not work when either -oldshell or -noinput
+%% options are passed to erl.
+get_input_iodevice() ->
+ case whereis(user) of
+ undefined ->
+ user;
+ User ->
+ case group:interfaces(User) of
+ [] ->
+ user;
+ [{user_drv, Drv}] ->
+ case user_drv:interfaces(Drv) of
+ [] -> user;
+ [{current_group, IoDevice}] -> IoDevice
+ end
+ end
+ end.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl
new file mode 100644
index 0000000000..3d718438a7
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl
@@ -0,0 +1,104 @@
+-module(rabbit_prelaunch_dist).
+
+-export([setup/1]).
+
+setup(#{nodename := Node, nodename_type := NameType} = Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Erlang distribution =="),
+ rabbit_log_prelaunch:debug("Rqeuested node name: ~s (type: ~s)",
+ [Node, NameType]),
+ case node() of
+ nonode@nohost ->
+ ok = rabbit_nodes_common:ensure_epmd(),
+ ok = dist_port_range_check(Context),
+ ok = dist_port_use_check(Context),
+ ok = duplicate_node_check(Context),
+
+ ok = do_setup(Context);
+ Node ->
+ rabbit_log_prelaunch:debug(
+ "Erlang distribution already running", []),
+ ok;
+ Unexpected ->
+ throw({error, {erlang_dist_running_with_unexpected_nodename,
+ Unexpected, Node}})
+ end,
+ ok.
+
+do_setup(#{nodename := Node, nodename_type := NameType}) ->
+ rabbit_log_prelaunch:debug("Starting Erlang distribution", []),
+ case application:get_env(kernel, net_ticktime) of
+ {ok, Ticktime} when is_integer(Ticktime) andalso Ticktime >= 1 ->
+ %% The value passed to net_kernel:start/1 is the
+ %% "minimum transition traffic interval" as defined in
+ %% net_kernel:set_net_ticktime/1.
+ MTTI = Ticktime * 1000 div 4,
+ {ok, _} = net_kernel:start([Node, NameType, MTTI]),
+ ok;
+ _ ->
+ {ok, _} = net_kernel:start([Node, NameType]),
+ ok
+ end,
+ ok.
+
+%% Check whether a node with the same name is already running
+duplicate_node_check(#{split_nodename := {NodeName, NodeHost}}) ->
+ rabbit_log_prelaunch:debug(
+ "Checking if node name ~s is already used", [NodeName]),
+ PrelaunchName = rabbit_nodes_common:make(
+ {NodeName ++ "_prelaunch_" ++ os:getpid(),
+ "localhost"}),
+ {ok, _} = net_kernel:start([PrelaunchName, shortnames]),
+ case rabbit_nodes_common:names(NodeHost) of
+ {ok, NamePorts} ->
+ case proplists:is_defined(NodeName, NamePorts) of
+ true ->
+ throw({error, {duplicate_node_name, NodeName, NodeHost}});
+ false ->
+ ok = net_kernel:stop(),
+ ok
+ end;
+ {error, EpmdReason} ->
+ throw({error, {epmd_error, NodeHost, EpmdReason}})
+ end.
+
+dist_port_range_check(#{erlang_dist_tcp_port := DistTcpPort}) ->
+ rabbit_log_prelaunch:debug(
+ "Checking if TCP port ~b is valid", [DistTcpPort]),
+ case DistTcpPort of
+ _ when DistTcpPort < 1 orelse DistTcpPort > 65535 ->
+ throw({error, {invalid_dist_port_range, DistTcpPort}});
+ _ ->
+ ok
+ end.
+
+dist_port_use_check(#{split_nodename := {_, NodeHost},
+ erlang_dist_tcp_port := DistTcpPort}) ->
+ rabbit_log_prelaunch:debug(
+ "Checking if TCP port ~b is available", [DistTcpPort]),
+ dist_port_use_check_ipv4(NodeHost, DistTcpPort).
+
+dist_port_use_check_ipv4(NodeHost, Port) ->
+ case gen_tcp:listen(Port, [inet, {reuseaddr, true}]) of
+ {ok, Sock} -> gen_tcp:close(Sock);
+ {error, einval} -> dist_port_use_check_ipv6(NodeHost, Port);
+ {error, _} -> dist_port_use_check_fail(Port, NodeHost)
+ end.
+
+dist_port_use_check_ipv6(NodeHost, Port) ->
+ case gen_tcp:listen(Port, [inet6, {reuseaddr, true}]) of
+ {ok, Sock} -> gen_tcp:close(Sock);
+ {error, _} -> dist_port_use_check_fail(Port, NodeHost)
+ end.
+
+-spec dist_port_use_check_fail(non_neg_integer(), string()) ->
+ no_return().
+
+dist_port_use_check_fail(Port, Host) ->
+ {ok, Names} = rabbit_nodes_common:names(Host),
+ case [N || {N, P} <- Names, P =:= Port] of
+ [] ->
+ throw({error, {dist_port_already_used, Port, not_erlang, Host}});
+ [Name] ->
+ throw({error, {dist_port_already_used, Port, Name, Host}})
+ end.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl
new file mode 100644
index 0000000000..4e371c76ae
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl
@@ -0,0 +1,115 @@
+-module(rabbit_prelaunch_early_logging).
+
+-include_lib("rabbit_common/include/rabbit_log.hrl").
+
+-export([setup_early_logging/2,
+ enable_quick_dbg/1,
+ use_colored_logging/0,
+ use_colored_logging/1,
+ list_expected_sinks/0]).
+
+setup_early_logging(#{log_levels := undefined} = Context,
+ LagerEventToStdout) ->
+ setup_early_logging(Context#{log_levels => get_default_log_level()},
+ LagerEventToStdout);
+setup_early_logging(Context, LagerEventToStdout) ->
+ Configured = lists:member(
+ lager_util:make_internal_sink_name(rabbit_log_prelaunch),
+ lager:list_all_sinks()),
+ case Configured of
+ true -> ok;
+ false -> do_setup_early_logging(Context, LagerEventToStdout)
+ end.
+
+get_default_log_level() ->
+ #{"prelaunch" => warning}.
+
+do_setup_early_logging(#{log_levels := LogLevels} = Context,
+ LagerEventToStdout) ->
+ redirect_logger_messages_to_lager(),
+ Colored = use_colored_logging(Context),
+ application:set_env(lager, colored, Colored),
+ ConsoleBackend = lager_console_backend,
+ case LagerEventToStdout of
+ true ->
+ GLogLevel = case LogLevels of
+ #{global := Level} -> Level;
+ _ -> warning
+ end,
+ _ = lager_app:start_handler(
+ lager_event, ConsoleBackend, [{level, GLogLevel}]),
+ ok;
+ false ->
+ ok
+ end,
+ lists:foreach(
+ fun(Sink) ->
+ CLogLevel = get_log_level(LogLevels, Sink),
+ lager_app:configure_sink(
+ Sink,
+ [{handlers, [{ConsoleBackend, [{level, CLogLevel}]}]}])
+ end, list_expected_sinks()),
+ ok.
+
+redirect_logger_messages_to_lager() ->
+ io:format(standard_error, "Configuring logger redirection~n", []),
+ ok = logger:add_handler(rabbit_log, rabbit_log, #{}),
+ ok = logger:set_primary_config(level, all).
+
+use_colored_logging() ->
+ use_colored_logging(rabbit_prelaunch:get_context()).
+
+use_colored_logging(#{log_levels := #{color := true},
+ output_supports_colors := true}) ->
+ true;
+use_colored_logging(_) ->
+ false.
+
+list_expected_sinks() ->
+ Key = {?MODULE, lager_extra_sinks},
+ case persistent_term:get(Key, undefined) of
+ undefined ->
+ CompileOptions = proplists:get_value(options,
+ module_info(compile),
+ []),
+ AutoList = [lager_util:make_internal_sink_name(M)
+ || M <- proplists:get_value(lager_extra_sinks,
+ CompileOptions, [])],
+ List = case lists:member(?LAGER_SINK, AutoList) of
+ true -> AutoList;
+ false -> [?LAGER_SINK | AutoList]
+ end,
+ %% Store the list in the application environment. If this
+ %% module is later cover-compiled, the compile option will
+ %% be lost, so we will be able to retrieve the list from the
+ %% application environment.
+ persistent_term:put(Key, List),
+ List;
+ List ->
+ List
+ end.
+
+sink_to_category(Sink) when is_atom(Sink) ->
+ re:replace(
+ atom_to_list(Sink),
+ "^rabbit_log_(.+)_lager_event$",
+ "\\1",
+ [{return, list}]).
+
+get_log_level(LogLevels, Sink) ->
+ Category = sink_to_category(Sink),
+ case LogLevels of
+ #{Category := Level} -> Level;
+ #{global := Level} -> Level;
+ _ -> warning
+ end.
+
+enable_quick_dbg(#{dbg_output := Output, dbg_mods := Mods}) ->
+ case Output of
+ stdout -> {ok, _} = dbg:tracer(),
+ ok;
+ _ -> {ok, _} = dbg:tracer(port, dbg:trace_port(file, Output)),
+ ok
+ end,
+ {ok, _} = dbg:p(all, c),
+ lists:foreach(fun(M) -> {ok, _} = dbg:tp(M, cx) end, Mods).
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl
new file mode 100644
index 0000000000..1e8fe2690d
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl
@@ -0,0 +1,47 @@
+-module(rabbit_prelaunch_erlang_compat).
+
+-export([check/1]).
+
+-define(OTP_MINIMUM, "21.3").
+-define(ERTS_MINIMUM, "10.3").
+
+check(_Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Erlang/OTP compatibility check =="),
+
+ ERTSVer = erlang:system_info(version),
+ OTPRel = rabbit_misc:otp_release(),
+ rabbit_log_prelaunch:debug(
+ "Requiring: Erlang/OTP ~s (ERTS ~s)", [?OTP_MINIMUM, ?ERTS_MINIMUM]),
+ rabbit_log_prelaunch:debug(
+ "Running: Erlang/OTP ~s (ERTS ~s)", [OTPRel, ERTSVer]),
+
+ case rabbit_misc:version_compare(?ERTS_MINIMUM, ERTSVer, lte) of
+ true when ?ERTS_MINIMUM =/= ERTSVer ->
+ rabbit_log_prelaunch:debug(
+ "Erlang/OTP version requirement satisfied"),
+ ok;
+ true when ?ERTS_MINIMUM =:= ERTSVer andalso ?OTP_MINIMUM =< OTPRel ->
+ %% When a critical regression or bug is found, a new OTP
+ %% release can be published without changing the ERTS
+ %% version. For instance, this is the case with R16B03 and
+ %% R16B03-1.
+ %%
+ %% In this case, we compare the release versions
+ %% alphabetically.
+ ok;
+ _ ->
+ Msg =
+ "This RabbitMQ version cannot run on Erlang ~s (erts ~s): "
+ "minimum required version is ~s (erts ~s)",
+ Args = [OTPRel, ERTSVer, ?OTP_MINIMUM, ?ERTS_MINIMUM],
+ rabbit_log_prelaunch:error(Msg, Args),
+
+ %% Also print to stderr to make this more visible
+ io:format(standard_error, "Error: " ++ Msg ++ "~n", Args),
+
+ Msg2 = rabbit_misc:format(
+ "Erlang ~s or later is required, started on ~s",
+ [?OTP_MINIMUM, OTPRel]),
+ throw({error, {erlang_version_too_old, Msg2}})
+ end.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl
new file mode 100644
index 0000000000..b2cc03d069
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl
@@ -0,0 +1,114 @@
+-module(rabbit_prelaunch_errors).
+
+-export([format_error/1,
+ format_exception/3,
+ log_error/1,
+ log_exception/3]).
+
+-define(BOOT_FAILED_HEADER,
+ "\n"
+ "BOOT FAILED\n"
+ "===========\n").
+
+-define(BOOT_FAILED_FOOTER,
+ "\n").
+
+log_error(Error) ->
+ Message = format_error(Error),
+ log_message(Message).
+
+format_error({error, {duplicate_node_name, NodeName, NodeHost}}) ->
+ rabbit_misc:format(
+ "ERROR: node with name ~p is already running on host ~p",
+ [NodeName, NodeHost]);
+format_error({error, {epmd_error, NodeHost, EpmdReason}}) ->
+ rabbit_misc:format(
+ "ERROR: epmd error for host ~s: ~s",
+ [NodeHost, rabbit_misc:format_inet_error(EpmdReason)]);
+format_error({error, {invalid_dist_port_range, DistTcpPort}}) ->
+ rabbit_misc:format(
+ "Invalid Erlang distribution TCP port: ~b", [DistTcpPort]);
+format_error({error, {dist_port_already_used, Port, not_erlang, Host}}) ->
+ rabbit_misc:format(
+ "ERROR: could not bind to distribution port ~b on host ~s. It could "
+ "be in use by another process or cannot be bound to (e.g. due to a "
+ "security policy)", [Port, Host]);
+format_error({error, {dist_port_already_used, Port, Name, Host}}) ->
+ rabbit_misc:format(
+ "ERROR: could not bind to distribution port ~b, it is in use by "
+ "another node: ~s@~s", [Port, Name, Host]);
+format_error({error, {erlang_dist_running_with_unexpected_nodename,
+ Unexpected, Node}}) ->
+ rabbit_misc:format(
+ "Erlang distribution running with another node name (~s) "
+ "than the configured one (~s)",
+ [Unexpected, Node]);
+format_error({bad_config_entry_decoder, missing_passphrase}) ->
+ rabbit_misc:format(
+ "Missing passphrase or missing passphrase read method in "
+ "`config_entry_decoder`", []);
+format_error({config_decryption_error, {key, Key}, _Msg}) ->
+ rabbit_misc:format(
+ "Error while decrypting key '~p'. Please check encrypted value, "
+ "passphrase, and encryption configuration~n",
+ [Key]);
+format_error({error, {timeout_waiting_for_tables, AllNodes, _}}) ->
+ Suffix =
+ "~nBACKGROUND~n==========~n~n"
+ "This cluster node was shut down while other nodes were still running.~n"
+ "To avoid losing data, you should start the other nodes first, then~n"
+ "start this one. To force this node to start, first invoke~n"
+ "\"rabbitmqctl force_boot\". If you do so, any changes made on other~n"
+ "cluster nodes after this one was shut down may be lost.",
+ {Message, Nodes} =
+ case AllNodes -- [node()] of
+ [] -> {rabbit_misc:format(
+ "Timeout contacting cluster nodes. Since RabbitMQ was"
+ " shut down forcefully~nit cannot determine which nodes"
+ " are timing out.~n" ++ Suffix, []),
+ []};
+ Ns -> {rabbit_misc:format(
+ "Timeout contacting cluster nodes: ~p.~n" ++ Suffix,
+ [Ns]),
+ Ns}
+ end,
+ Message ++ "\n" ++ rabbit_nodes_common:diagnostics(Nodes);
+format_error({error, {cannot_log_to_file, unknown, Reason}}) ->
+ rabbit_misc:format(
+ "failed to initialised logger: ~p~n",
+ [Reason]);
+format_error({error, {cannot_log_to_file, LogFile,
+ {cannot_create_parent_dirs, _, Reason}}}) ->
+ rabbit_misc:format(
+ "failed to create parent directory for log file at '~s', reason: ~s~n",
+ [LogFile, file:format_error(Reason)]);
+format_error({error, {cannot_log_to_file, LogFile, Reason}}) ->
+ rabbit_misc:format(
+ "failed to open log file at '~s', reason: ~s",
+ [LogFile, file:format_error(Reason)]);
+format_error(Error) ->
+ rabbit_misc:format("Error during startup: ~p", [Error]).
+
+log_exception(Class, Exception, Stacktrace) ->
+ Message = format_exception(Class, Exception, Stacktrace),
+ log_message(Message).
+
+format_exception(Class, Exception, Stacktrace) ->
+ rabbit_misc:format(
+ "Exception during startup:~n~s",
+ [lager:pr_stacktrace(Stacktrace, {Class, Exception})]).
+
+log_message(Message) ->
+ Lines = string:split(
+ ?BOOT_FAILED_HEADER ++
+ Message ++
+ ?BOOT_FAILED_FOOTER,
+ [$\n],
+ all),
+ lists:foreach(
+ fun(Line) ->
+ rabbit_log_prelaunch:error("~s", [Line]),
+ io:format(standard_error, "~s~n", [Line])
+ end, Lines),
+ timer:sleep(1000),
+ ok.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl
new file mode 100644
index 0000000000..f9a60effda
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl
@@ -0,0 +1,93 @@
+-module(rabbit_prelaunch_sighandler).
+-behaviour(gen_event).
+
+-export([setup/0,
+ init/1,
+ handle_event/2,
+ handle_call/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+%% CAUTION: Signal handling in this module must be kept consistent
+%% with the same handling in rabbitmq-server(8).
+
+%% #{signal => default | ignore | stop}.
+-define(SIGNALS_HANDLED_BY_US,
+ #{
+ %% SIGHUP is often used to reload the configuration or reopen
+ %% log files after they were rotated. We don't support any
+ %% of those two cases, so ignore it for now, until we can do
+ %% something about it.
+ sighup => ignore,
+
+ %% SIGTSTP is triggered by Ctrl+Z to pause a program. However
+ %% we can't handle SIGCONT, the signal used to resume the
+ %% program. Unfortunately, it makes a SIGTSTP handler less
+ %% useful here.
+ sigtstp => ignore
+ }).
+
+-define(SIGNAL_HANDLED_BY_ERLANG(Signal),
+ Signal =:= sigusr1 orelse
+ Signal =:= sigquit orelse
+ Signal =:= sigterm).
+
+-define(SERVER, erl_signal_server).
+
+setup() ->
+ case os:type() of
+ {unix, _} ->
+ case whereis(?SERVER) of
+ undefined ->
+ ok;
+ _ ->
+ case lists:member(?MODULE, gen_event:which_handlers(?SERVER)) of
+ true -> ok;
+ false -> gen_event:add_handler(?SERVER, ?MODULE, [])
+ end
+ end;
+ _ ->
+ ok
+ end.
+
+init(_Args) ->
+ maps:fold(
+ fun
+ (Signal, _, Ret) when ?SIGNAL_HANDLED_BY_ERLANG(Signal) -> Ret;
+ (Signal, default, ok) -> os:set_signal(Signal, default);
+ (Signal, ignore, ok) -> os:set_signal(Signal, ignore);
+ (Signal, _, ok) -> os:set_signal(Signal, handle)
+ end, ok, ?SIGNALS_HANDLED_BY_US),
+ {ok, #{}}.
+
+handle_event(Signal, State) when ?SIGNAL_HANDLED_BY_ERLANG(Signal) ->
+ {ok, State};
+handle_event(Signal, State) ->
+ case ?SIGNALS_HANDLED_BY_US of
+ %% The code below can be uncommented if we introduce a signal
+ %% which should stop RabbitMQ.
+ %
+ %#{Signal := stop} ->
+ % error_logger:info_msg(
+ % "~s received - shutting down~n",
+ % [string:uppercase(atom_to_list(Signal))]),
+ % ok = init:stop();
+ _ ->
+ error_logger:info_msg(
+ "~s received - unhandled signal~n",
+ [string:uppercase(atom_to_list(Signal))])
+ end,
+ {ok, State}.
+
+handle_info(_, State) ->
+ {ok, State}.
+
+handle_call(_, State) ->
+ {ok, ok, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Args, _State) ->
+ ok.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl
new file mode 100644
index 0000000000..9fd117d9f3
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl
@@ -0,0 +1,22 @@
+-module(rabbit_prelaunch_sup).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ BootStateSup = #{id => bootstate,
+ start => {rabbit_boot_state_sup, start_link, []},
+ type => supervisor},
+ %% `rabbit_prelaunch` does not start a process, it only configures
+ %% the node.
+ Prelaunch = #{id => prelaunch,
+ start => {rabbit_prelaunch, run_prelaunch_first_phase, []},
+ restart => transient},
+ Procs = [BootStateSup, Prelaunch],
+ {ok, {#{strategy => one_for_one,
+ intensity => 1,
+ period => 5}, Procs}}.
diff --git a/deps/rabbit/docs/README-for-packages b/deps/rabbit/docs/README-for-packages
new file mode 100644
index 0000000000..f507a74054
--- /dev/null
+++ b/deps/rabbit/docs/README-for-packages
@@ -0,0 +1,30 @@
+This is rabbitmq-server, a message broker implementing AMQP 0-9-1, AMQP 1.0,
+STOMP and MQTT.
+
+Most of the documentation for RabbitMQ is provided on the RabbitMQ web
+site. You can see documentation for the current version at
+
+https://www.rabbitmq.com/documentation.html
+
+and for previous versions at
+
+https://www.rabbitmq.com/previous.html
+
+Man pages are installed with this package. Of particular interest are
+rabbitmqctl(8), rabbitmq-diagnostics(8), rabbitmq-queues(8).
+They interact with a running node. rabbitmq-plugins(8) is used to manage plugins.
+All of these should be run as the superuser. Learn more about
+CLI tools at
+
+https://www.rabbitmq.com/cli.html
+
+An example configuration file is provided in the same directory as
+this README. Copy it to /etc/rabbitmq/rabbitmq.conf to use it. The
+RabbitMQ server must be restarted after changing the configuration
+file. Learn more about configuration at
+
+https://www.rabbitmq.com/configure.html
+
+An example policy file for HA queues is provided in the same directory
+as this README. Copy and chmod +x it to
+/usr/local/sbin/set_rabbitmq_policy to use it with the Pacemaker OCF RA.
diff --git a/deps/rabbit/docs/README.md b/deps/rabbit/docs/README.md
new file mode 100644
index 0000000000..dba1983378
--- /dev/null
+++ b/deps/rabbit/docs/README.md
@@ -0,0 +1,35 @@
+# Manual Pages and Documentation Extras
+
+This directory contains [CLI tool](https://rabbitmq.com/cli.html) man page sources as well as a few documentation extras:
+
+ * An [annotated rabbitmq.conf example](./rabbitmq.conf.example) (see [new style configuration format](https://www.rabbitmq.com/configure.html#config-file-formats))
+ * An [annotated advanced.config example](./advanced.config.example) (see [The advanced.config file](https://www.rabbitmq.com/configure.html#advanced-config-file))
+ * A [systemd unit file example](./rabbitmq-server.service.example)
+
+Please [see rabbitmq.com](https://rabbitmq.com/documentation.html) for documentation guides.
+
+
+## Classic Config File Format Example
+
+Feeling nostalgic and looking for the [classic configuration file example](https://github.com/rabbitmq/rabbitmq-server/blob/v3.7.x/docs/rabbitmq.config.example)?
+Now that's old school! Keep in mind that classic configuration file **should be considered deprecated**.
+Prefer `rabbitmq.conf` (see [new style configuration format](https://www.rabbitmq.com/configure.html#config-file-formats))
+with an `advanced.config` to complement it as needed.
+
+
+## man Pages
+
+### Source Files
+
+This directory contains man pages that are are converted to HTML using `mandoc`:
+
+ gmake web-manpages
+
+The result is then copied to the [website repository](https://github.com/rabbitmq/rabbitmq-website/tree/live/site/man)
+
+### Contributions
+
+Since deployed man pages are generated, it is important to keep them in sync with the source.
+Accepting community contributions — which will always come as website pull requests —
+is fine but the person who merges them is responsible for backporting all changes
+to the source pages in this repo.
diff --git a/deps/rabbit/docs/advanced.config.example b/deps/rabbit/docs/advanced.config.example
new file mode 100644
index 0000000000..dc5ab8fc0c
--- /dev/null
+++ b/deps/rabbit/docs/advanced.config.example
@@ -0,0 +1,109 @@
+[
+
+
+ %% ----------------------------------------------------------------------------
+ %% Advanced Erlang Networking/Clustering Options.
+ %%
+ %% See https://www.rabbitmq.com/clustering.html for details
+ %% ----------------------------------------------------------------------------
+ %% Sets the net_kernel tick time.
+ %% Please see http://erlang.org/doc/man/kernel_app.html and
+ %% https://www.rabbitmq.com/nettick.html for further details.
+ %%
+ %% {kernel, [{net_ticktime, 60}]},
+ %% ----------------------------------------------------------------------------
+ %% RabbitMQ Shovel Plugin
+ %%
+ %% See https://www.rabbitmq.com/shovel.html for details
+ %% ----------------------------------------------------------------------------
+
+ {rabbitmq_shovel,
+ [{shovels,
+ [%% A named shovel worker.
+ %% {my_first_shovel,
+ %% [
+
+ %% List the source broker(s) from which to consume.
+ %%
+ %% {sources,
+ %% [%% URI(s) and pre-declarations for all source broker(s).
+ %% {brokers, ["amqp://user:password@host.domain/my_vhost"]},
+ %% {declarations, []}
+ %% ]},
+
+ %% List the destination broker(s) to publish to.
+ %% {destinations,
+ %% [%% A singular version of the 'brokers' element.
+ %% {broker, "amqp://"},
+ %% {declarations, []}
+ %% ]},
+
+ %% Name of the queue to shovel messages from.
+ %%
+ %% {queue, <<"your-queue-name-goes-here">>},
+
+ %% Optional prefetch count.
+ %%
+ %% {prefetch_count, 10},
+
+ %% when to acknowledge messages:
+ %% - no_ack: never (auto)
+ %% - on_publish: after each message is republished
+ %% - on_confirm: when the destination broker confirms receipt
+ %%
+ %% {ack_mode, on_confirm},
+
+ %% Overwrite fields of the outbound basic.publish.
+ %%
+ %% {publish_fields, [{exchange, <<"my_exchange">>},
+ %% {routing_key, <<"from_shovel">>}]},
+
+ %% Static list of basic.properties to set on re-publication.
+ %%
+ %% {publish_properties, [{delivery_mode, 2}]},
+
+ %% The number of seconds to wait before attempting to
+ %% reconnect in the event of a connection failure.
+ %%
+ %% {reconnect_delay, 2.5}
+
+ %% ]} %% End of my_first_shovel
+ ]}
+ %% Rather than specifying some values per-shovel, you can specify
+ %% them for all shovels here.
+ %%
+ %% {defaults, [{prefetch_count, 0},
+ %% {ack_mode, on_confirm},
+ %% {publish_fields, []},
+ %% {publish_properties, [{delivery_mode, 2}]},
+ %% {reconnect_delay, 2.5}]}
+ ]},
+
+ {rabbitmq_auth_backend_ldap, [
+ %%
+ %% Authorisation
+ %% =============
+ %%
+
+ %% The LDAP plugin can perform a variety of queries against your
+ %% LDAP server to determine questions of authorisation. See
+ %% https://www.rabbitmq.com/ldap.html#authorisation for more
+ %% information.
+
+ %% Set the query to use when determining vhost access
+ %%
+ %% {vhost_access_query, {in_group,
+ %% "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}},
+
+ %% Set the query to use when determining resource (e.g., queue) access
+ %%
+ %% {resource_access_query, {constant, true}},
+
+ %% Set queries to determine which tags a user has
+ %%
+ %% {tag_queries, []}
+ ]}
+].
+
+
+
diff --git a/deps/rabbit/docs/rabbitmq-diagnostics.8 b/deps/rabbit/docs/rabbitmq-diagnostics.8
new file mode 100644
index 0000000000..7a6d53d097
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-diagnostics.8
@@ -0,0 +1,725 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-DIAGNOSTICS 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-diagnostics
+.Nd RabbitMQ diagnostics, monitoring and health checks tools
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+is a command line tool that provides commands used for diagnostics, monitoring
+and health checks of RabbitMQ nodes.
+See the
+.Lk https://rabbitmq.com/documentation.html "RabbitMQ documentation guides"
+to learn more about RabbitMQ diagnostics, monitoring and health checks.
+
+.Nm
+allows the operator to inspect node and cluster state. A number of
+health checks are available to be used interactively and by monitoring tools.
+
+.Pp
+By default if it is not possible to connect to and authenticate with the target node
+(for example if it is stopped), the operation will fail.
+To learn more, see the
+.Lk https://rabbitmq.com/monitoring.html "RabbitMQ Monitoring guide"
+.
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Pf rabbit@ Ar target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------
+.Pp
+Most commands provided by
+.Nm
+inspect node and cluster state or perform health checks.
+.Pp
+Commands that list topology entities (e.g. queues) use tab as column delimiter.
+These commands and their arguments are delegated to rabbitmqctl(8).
+.Pp
+Some commands (
+.Cm list_queues ,
+.Cm list_exchanges ,
+.Cm list_bindings
+and
+.Cm list_consumers )
+accept an optional
+.Ar vhost
+parameter.
+.Pp
+The
+.Cm list_queues ,
+.Cm list_exchanges
+and
+.Cm list_bindings
+commands accept an optional virtual host parameter for which to display
+results.
+The default value is
+.Qq / .
+.El
+.Ss Help
+.Bl -tag -width Ds
+.\" ------------------------------------
+.Bl -tag -width Ds
+.It Cm help Oo Fl l Oc Op Ar command_name
+.Pp
+Prints usage for all available commands.
+.Bl -tag -width Ds
+.It Fl l , Fl -list-commands
+List command usages only, without parameter explanation.
+.It Ar command_name
+Prints usage for the specified command.
+.El
+.\" ------------------------------------
+.It Cm version
+.Pp
+Displays CLI tools version
+.El
+.Ss Nodes
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm wait
+.Pp
+See
+.Cm wait
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Cluster
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm cluster_status
+.Pp
+See
+.Cm cluster_status
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Users
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_users
+.Pp
+See
+.Cm list_users
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Access Control
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_permissions Op Fl p Ar vhost
+.Pp
+See
+.Cm list_permissions
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_topic_permissions Op Fl p Ar vhost
+.Pp
+See
+.Cm list_topic_permissions
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_user_permissions Ar username
+.Pp
+See
+.Cm list_user_permissions
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_user_topic_permissions Ar username
+.Pp
+See
+.Cm list_user_topic_permissions
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_vhosts Op Ar vhostinfoitem ...
+.Pp
+See
+.Cm list_vhosts
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Monitoring, observability and health checks
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm alarms
+.Pp
+Lists resource alarms, if any, in the cluster.
+.Pp
+See
+.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics alarms
+.\" ------------------------------------
+.It Cm certificates
+.Pp
+Displays the node certificates for every listener on target node that is configured to use TLS.
+.Pp
+Example:
+.sp
+.Dl rabbitmq-diagnostics certificates
+.\" ------------------------------------
+.It Cm check_alarms
+.Pp
+Health check that fails (returns with a non-zero code) if there are alarms
+in effect on any of the cluster nodes.
+.Pp
+See
+.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_alarms
+.\" ------------------------------------
+.It Cm check_certificate_expiration Oo Fl -unit Ar time_unit Oc Op Fl -within Ar seconds
+.Pp
+Checks the expiration date on the certificates for every listener on target node that is configured to use TLS.
+Supported time units are:
+.Bl -bullet
+.It
+days
+.It
+weeks
+.It
+months
+.It
+years
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_certificate_expiration --unit weeks --within 6
+.\" ------------------------------------
+.It Cm check_local_alarms
+.Pp
+Health check that fails (returns with a non-zero code) if there are alarms
+in effect on the target node.
+.Pp
+See
+.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_local_alarms
+.\" ------------------------------------
+.It Cm check_port_connectivity
+.Pp
+Health check that fails (returns with a non-zero code) if any listener ports
+on the target node cannot accept a new TCP connection opened by
+.Nm
+.Pp
+The check only validates if a new TCP connection is accepted. It does not
+perform messaging protocol handshake or authenticate.
+.Pp
+See
+.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_port_connectivity
+.\" ------------------------------------
+.It Cm check_port_listener Ar port
+.Pp
+Health check that fails (returns with a non-zero code) if the target node
+is not listening on the specified port (there is no listener that
+uses that port).
+.Pp
+See
+.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_port_listener 5672
+.\" ------------------------------------
+.It Cm check_protocol_listener Ar protocol
+.Pp
+Health check that fails (returns with a non-zero code) if the target node
+does not have a listener for the specified protocol.
+.Pp
+See
+.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_protocol_listener mqtt
+.\" ------------------------------------
+.It Cm check_running
+.Pp
+Health check that fails (returns with a non-zero code) if the RabbitMQ
+application is not running on the target node.
+.Pp
+If
+.Cm rabbitmqctl(8)
+was used to stop the application, this check will fail.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_running
+.\" ------------------------------------
+.It Cm check_virtual_hosts
+.Pp
+Health check that checks if all vhosts are running in the target node
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_virtual_hosts --timeout 60
+.\" ------------------------------------
+.It Cm cipher_suites
+.Pp
+Lists cipher suites enabled by default. To list all available cipher suites, add the --all argument.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics cipher_suites --format openssl --all
+.\" ------------------------------------
+.It Cm command_line_arguments
+.Pp
+Displays target node's command-line arguments and flags as reported by the runtime.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics command_line_arguments -n rabbit@hostname
+.\" ------------------------------------
+.It Cm consume_event_stream Oo Fl -duration Ar seconds | Fl d Ar seconds Oc Oo Fl -pattern Ar pattern Oc Op Fl -timeout Ar milliseconds
+.Pp
+Streams internal events from a running node. Output is jq-compatible.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics consume_event_stream -n rabbit@hostname --duration 20 --pattern "queue_.*"
+.\" ------------------------------------
+.It Cm discover_peers
+.Pp
+Runs a peer discovery on the target node and prints the discovered nodes, if any.
+.Pp
+See
+.Lk https://rabbitmq.com/cluster-formation.html "RabbitMQ Cluster Formation guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics discover_peers --timeout 60
+.\" ------------------------------------
+.It Cm environment
+See
+.Cm environment
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm erlang_cookie_hash
+.Pp
+Outputs a hashed value of the shared secret used by the target node
+to authenticate CLI tools and peers. The value can be compared with the hash
+found in error messages of CLI tools.
+.Pp
+See
+.Lk https://rabbitmq.com/clustering.html#erlang-cookie "RabbitMQ Clustering guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics erlang_cookie_hash -q
+.\" ------------------------------------
+.It Cm erlang_version
+.Pp
+Reports target node's Erlang/OTP version.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics erlang_version -q
+.\" ------------------------------------
+.It Cm is_booting
+.Pp
+Reports if RabbitMQ application is currently booting (not booted/running or stopped) on
+the target node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics is_booting
+.\" ------------------------------------
+.It Cm is_running
+.Pp
+Reports if RabbitMQ application is fully booted and running (that is, not stopped) on
+the target node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics is_running
+.\" ------------------------------------
+.It Cm list_bindings Oo Fl p Ar vhost Oc Op Ar bindinginfoitem ...
+.Pp
+See
+.Cm list_bindings
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_channels Op Ar channelinfoitem ...
+.Pp
+See
+.Cm list_channels
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_ciphers
+.Pp
+See
+.Cm list_ciphers
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_connections Op Ar connectioninfoitem ...
+.Pp
+See
+.Cm list_connections
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_consumers Op Fl p Ar vhost
+.Pp
+See
+.Cm list_consumers
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_exchanges Oo Fl p Ar vhost Oc Op Ar exchangeinfoitem ...
+.Pp
+See
+.Cm list_exchanges
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_hashes
+.Pp
+See
+.Cm list_hashes
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_queues Oo Fl p Ar vhost Oc Oo Fl -offline | Fl -online | Fl -local Oc Op Ar queueinfoitem ...
+.Pp
+See
+.Cm list_queues
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_unresponsive_queues Oc Oo Fl -local Oc Oo Fl -queue-timeout Ar milliseconds Oc Oo Ar column ... Oc Op Fl -no-table-headers
+.Pp
+See
+.Cm list_unresponsive_queues
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm listeners
+.Pp
+Lists listeners (bound sockets) on this node. Use this to inspect
+what protocols and ports the node is listening on for client, CLI tool
+and peer connections.
+.Pp
+See
+.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics listeners
+.\" ------------------------------------
+.It Cm log_tail Fl -number Ar number | Fl N Ar number Op Fl -timeout Ar milliseconds
+.Pp
+Prints the last N lines of the log on the node
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics log_tail -number 100
+.\" ------------------------------------
+.It Cm log_tail_stream Oo Fl -duration Ar seconds | Fl d Ar seconds Oc Op Fl -timeout Ar milliseconds
+.Pp
+Streams logs from a running node for a period of time
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics log_tail_stream --duration 60
+.\" ------------------------------------
+.It Cm maybe_stuck
+.Pp
+Periodically samples stack traces of all Erlang processes
+("lightweight threads") on the node. Reports the processes for which
+stack trace samples are identical.
+.Pp
+Identical samples may indicate that the process is not making any progress
+but is not necessarily an indication of a problem.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics maybe_stuck -q
+.\" ------------------------------------
+.It Cm memory_breakdown Op Fl -unit Ar memory_unit
+.Pp
+Displays node's memory usage by category.
+Supported memory units are:
+.Bl -bullet
+.It
+bytes
+.It
+megabytes
+.It
+gigabytes
+.It
+terabytes
+.El
+.Pp
+See
+.Lk https://rabbitmq.com/memory-use.html "RabbitMQ Memory Use guide"
+to learn more.
+.Pp
+Example:
+.sp
+.Dl rabbitmq-diagnostics memory_breakdown --unit gigabytes
+.\" ------------------------------------
+.It Cm observer Op Fl -interval Ar seconds
+.Pp
+Starts a CLI observer interface on the target node
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics observer --interval 10
+.\" ------------------------------------
+.It Cm ping
+.Pp
+Most basic health check. Succeeds if target node (runtime) is running
+and
+.Nm
+can authenticate with it successfully.
+.\" ------------------------------------
+.It Cm report
+.Pp
+See
+.Cm report
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm runtime_thread_stats Op Fl -sample-interval Ar interval
+.Pp
+Performs sampling of runtime (kernel) threads' activity for
+.Ar interval
+seconds and reports it.
+.Pp
+For this command to work, Erlang/OTP on the target node must be compiled with
+microstate accounting support and have the runtime_tools package available.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics runtime_thread_stats --sample-interval 15
+.\" ------------------------------------
+.It Cm schema_info Oo Fl -no_table_headers Oc Oo Ar column ... Oc Op Fl -timeout Ar milliseconds
+.Pp
+See
+.Cm schema_info
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm server_version
+.Pp
+Reports target node's version.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics server_version -q
+.\" ------------------------------------
+.It Cm status
+.Pp
+See
+.Cm status
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm tls_versions
+.Pp
+Lists all TLS versions supported by the runtime on the target node.
+Note that RabbitMQ can be configured to only accept a subset of those
+versions, for example, SSLv3 is disabled by default.
+.Pp
+See
+.Lk https://rabbitmq.com/ssl.html "RabbitMQ TLS guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics tls_versions -q
+.El
+.Ss Parameters
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_global_parameters
+.Pp
+See
+.Cm list_global_parameters
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_parameters Op Fl p Ar vhost
+.Pp
+See
+.Cm list_parameters
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Policies
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_operator_policies Op Fl p Ar vhost
+.Pp
+See
+.Cm list_operator_policies
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_policies Op Fl p Ar vhost
+.Pp
+See
+.Cm list_policies
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.El
+.Ss Virtual hosts
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_vhost_limits Oo Fl -vhost Ar vhost Oc Oo Fl -global Oc Op Fl -no-table-headers
+.Pp
+See
+.Cm list_vhost_limits
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Node configuration
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm log_location Oo Fl -all | Fl a Oc Op Fl -timeout Ar milliseconds
+.Pp
+Shows log file location(s) on target node
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics log_location -a
+.El
+.Ss Feature flags
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_feature_flags Oo Ar column ... Oc Op Fl -timeout Ar milliseconds
+.Pp
+See
+.Cm list_feature_flags
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Queues
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm quorum_status Ar queue Op Fl -vhost Ar vhost
+.Pp
+See
+.Cm quorum_status
+in
+.Xr rabbitmq-queues 8
+.It Cm check_if_node_is_mirror_sync_critical
+.Pp
+See
+.Cm check_if_node_is_mirror_sync_critical
+in
+.Xr rabbitmq-queues 8
+.It Cm check_if_node_is_quorum_critical
+.Pp
+See
+.Cm check_if_node_is_quorum_critical
+in
+.Xr rabbitmq-queues 8
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-echopid.8 b/deps/rabbit/docs/rabbitmq-echopid.8
new file mode 100644
index 0000000000..f51dab854c
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-echopid.8
@@ -0,0 +1,70 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-ECHOPID.BAT 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-echopid.bat
+.Nd returns the Windows process id of the Erlang runtime running RabbitMQ
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Ar sname
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+RabbitMQ is an open source multi-protocol messaging broker.
+.Pp
+Running
+.Nm
+will attempt to discover and echo the process id (PID) of the Erlang
+runtime process
+.Pq Pa erl.exe
+that is hosting RabbitMQ.
+To allow
+.Pa erl.exe
+time to start up and load RabbitMQ, the script will wait for ten seconds
+before timing out if a suitable PID cannot be found.
+.Pp
+If a PID is discovered, the script will echo it to stdout
+before exiting with a
+.Ev ERRORLEVEL
+of 0.
+If no PID is discovered before the timeout, nothing is written to stdout
+and the script exits setting
+.Ev ERRORLEVEL
+to 1.
+.Pp
+Note that this script only exists on Windows due to the need to wait for
+.Pa erl.exe
+and possibly time-out.
+To obtain the PID on Unix set
+.Ev RABBITMQ_PID_FILE
+before starting
+.Xr rabbitmq-server 8
+and do not use
+.Fl detached .
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Ar sname
+The short-name form of the RabbitMQ node name.
+.El
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmqctl 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-env.conf.5 b/deps/rabbit/docs/rabbitmq-env.conf.5
new file mode 100644
index 0000000000..b1bb26281b
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-env.conf.5
@@ -0,0 +1,86 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-ENV.CONF 5
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-env.conf
+.Nd environment variables used by RabbitMQ server
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+contains environment variables that override the defaults built in to the
+RabbitMQ scripts and CLI tools.
+.Pp
+The file is interpreted by the system shell, and so should consist of a
+sequence of shell environment variable definitions.
+Normal shell syntax is permitted (since the file is sourced using the
+shell "." operator), including line comments starting with "#".
+.Pp
+In order of preference, the startup scripts get their values from the
+environment, from
+.Nm
+and finally from the built-in default values.
+For example, for the
+.Ev RABBITMQ_NODENAME
+setting,
+.Ev RABBITMQ_NODENAME
+from the environment is checked first.
+If it is absent or equal to the empty string, then
+.Ev NODENAME
+from
+.Nm
+is checked.
+If it is also absent or set equal to the empty string then the default
+value from the startup script is used.
+.Pp
+The variable names in
+.Nm
+are always equal to the environment variable names, with the
+.Qq RABBITMQ_
+prefix removed:
+.Ev RABBITMQ_NODE_PORT
+from the environment becomes
+.Ev NODE_PORT
+in
+.Nm .
+.\" ------------------------------------------------------------------
+.Sh EXAMPLES
+.\" ------------------------------------------------------------------
+Below is an example of a minimalistic
+.Nm
+file that overrides the default node name prefix from "rabbit" to
+"hare".
+.sp
+.Dl # I am a complete rabbitmq-env.conf file.
+.Dl # Comment lines start with a hash character.
+.Dl # This is a /bin/sh script file - use ordinary envt var syntax
+.Dl NODENAME=hare
+
+In the below
+.Nm
+file RabbitMQ configuration file location is changed to "/data/services/rabbitmq/rabbitmq.conf".
+.sp
+.Dl # I am a complete rabbitmq-env.conf file.
+.Dl # Comment lines start with a hash character.
+.Dl # This is a /bin/sh script file - use ordinary envt var syntax
+.Dl CONFIG_FILE=/data/services/rabbitmq/rabbitmq.conf
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmq-echopid 8 ,
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmqctl 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-plugins.8 b/deps/rabbit/docs/rabbitmq-plugins.8
new file mode 100644
index 0000000000..4cec8cfded
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-plugins.8
@@ -0,0 +1,254 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-PLUGINS 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-plugins
+.Nd command line tool for managing RabbitMQ plugins
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+is a command line tool for managing RabbitMQ plugins.
+See the
+.Lk https://www.rabbitmq.com/plugins.html "RabbitMQ Plugins guide"
+for an overview of RabbitMQ plugins and how they are used.
+
+.Nm
+allows the operator to enable, disable and inspect plugins.
+It must be run by a user with write permissions to the RabbitMQ
+configuration directory.
+.Pp
+Plugins can depend on other plugins.
+.Nm
+resolves the dependencies and enables or disables all dependencies
+so that the user doesn't have to manage them explicitly.
+Plugins listed on the
+.Nm
+command line are marked as explicitly enabled; dependent plugins are
+marked as implicitly enabled.
+Implicitly enabled plugins are automatically disabled again when they
+are no longer required.
+.Pp
+The
+.Cm enable ,
+.Cm disable ,
+and
+.Cm set
+commands will update the plugins file and then attempt to connect to the
+broker and ensure it is running all enabled plugins.
+By default if it is not possible to connect to and authenticate with the target node
+(for example if it is stopped), the operation will fail.
+If
+.Nm
+is used on the same host as the target node,
+.Fl -offline
+can be specified to make
+.Nm
+resolve and update plugin state directly (without contacting the node).
+Such changes will only have an effect on next node start.
+To learn more, see the
+.Lk https://www.rabbitmq.com/plugins.html "RabbitMQ Plugins guide"
+.
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Ar rabbit@target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list Oo Fl Eemv Oc Op Ar pattern
+.Bl -tag -width Ds
+.It Fl E
+Show only explicitly enabled plugins.
+.It Fl e
+Show only explicitly or implicitly enabled plugins.
+.It Fl m
+Show only plugin names (minimal).
+.It Fl v
+Show all plugin details (verbose).
+.It Ar pattern
+Pattern to filter the plugin names by.
+.El
+.Pp
+Lists all plugins, their versions, dependencies and descriptions.
+Each plugin is prefixed with two status indicator characters inside [ ].
+The first indicator can be:
+.Bl -tag -width <space> -compact
+.It Sy <space>
+to indicate that the plugin is not enabled
+.It Sy E
+to indicate that it is explicitly enabled
+.It Sy e
+to indicate that it is implicitly enabled
+.It Sy \!
+to indicate that it is enabled but missing and thus not operational
+.El
+.Pp
+The second indicator can be:
+.Bl -tag -width <space> -compact
+.It Sy <space>
+to show that the plugin is not running
+.It Sy *
+to show that it is
+.El
+.Pp
+If the optional pattern is given, only plugins whose name matches
+.Ar pattern
+are shown.
+.Pp
+For example, this command lists all plugins, on one line each
+.sp
+.Dl rabbitmq-plugins list
+.Pp
+This command lists all plugins:
+.sp
+.Dl rabbitmq-plugins list -v
+.Pp
+This command lists all plugins whose name contains "management".
+.sp
+.Dl rabbitmq-plugins list -v management
+.Pp
+This command lists all implicitly or explicitly enabled RabbitMQ plugins.
+.sp
+.Dl rabbitmq-plugins list -e rabbit
+.\" ------------------------------------
+.It Cm enable Oo Fl -offline Oc Oo Fl -online Oc Ar plugin ...
+.Bl -tag -width Ds
+.It Fl -offline
+Modify node's enabled plugin state directly without contacting the node.
+.It Fl -online
+Treat a failure to connect to the running broker as fatal.
+.It Ar plugin
+One or more plugins to enable.
+.El
+.Pp
+Enables the specified plugins and all their dependencies.
+.Pp
+For example, this command enables the
+.Qq shovel
+and
+.Qq management
+plugins and all their dependencies:
+.sp
+.Dl rabbitmq\-plugins enable rabbitmq_shovel rabbitmq_management
+.\" ------------------------------------
+.It Cm disable Oo Fl -offline Oc Oo Fl -online Oc Ar plugin ...
+.Bl -tag -width Ds
+.It Fl -offline
+Modify node's enabled plugin state directly without contacting the node.
+.It Fl -online
+Treat a failure to connect to the running broker as fatal.
+.It Ar plugin
+One or more plugins to disable.
+.El
+.Pp
+Disables the specified plugins and all their dependencies.
+.Pp
+For example, this command disables
+.Qq rabbitmq_management
+and all plugins that depend on it:
+.sp
+.Dl rabbitmq-plugins disable rabbitmq_management
+.\" ------------------------------------
+.It Cm set Oo Fl -offline Oc Oo Fl -online Oc Op Ar plugin ...
+.Bl -tag -width Ds
+.It Fl -offline
+Modify node's enabled plugin state directly without contacting the node.
+.It Fl -online
+Treat a failure to connect to the running broker as fatal.
+.It Ar plugin
+Zero or more plugins to disable.
+.El
+.Pp
+Enables the specified plugins and all their dependencies.
+Unlike
+.Cm enable ,
+this command ignores and overwrites any existing enabled plugins.
+.Cm set
+with no plugin arguments is a legal command meaning "disable all plugins".
+.Pp
+For example, this command enables the
+.Qq management
+plugin and its dependencies and disables everything else:
+.sp
+.Dl rabbitmq-plugins set rabbitmq_management
+.El
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-queues.8 b/deps/rabbit/docs/rabbitmq-queues.8
new file mode 100644
index 0000000000..a0bc41a19c
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-queues.8
@@ -0,0 +1,202 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-QUEUES 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-queues
+.Nd RabbitMQ queue management tools
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+is a command line tool that provides commands used to manage queues,
+mainly member handling for quorum queues.
+See the
+.Lk https://www.rabbitmq.com/quorum-queues.html "RabbitMQ quorum queues guide"
+and
+.Lk https://www.rabbitmq.com/ha.html "RabbitMQ classic mirrored queues guide"
+to learn more about queue types in RabbitMQ.
+.
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Pf rabbit@ Ar target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm help
+.Pp
+Displays general help and commands supported by
+.Nm .
+.El
+.Ss Cluster
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm grow Ar node Ar selector Fl -vhost-pattern Ar pattern Fl -queue-pattern Ar pattern Fl -errors-only
+.Pp
+Adds a new replica on the given node for all or a half of matching quorum queues.
+.Pp
+Supported
+.Ar selector
+values are:
+.Bl -tag -width Ds
+.It Dv Sy all
+Selects all quorum queues
+.It Dv Sy even
+Selects quorum queues with an even number of replicas
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues grow Qo rabbit@newhost Qc Qo all Qc --vhost-pattern Qo a-vhost Qc --queue-pattern Qo .* Qc
+.\" ------------------------------------
+.It Cm rebalance Ar type Fl -vhost-pattern Ar pattern Fl -queue-pattern Ar pattern
+.Pp
+Rebalances queue master replicas across cluster nodes.
+.Pp
+Supported
+.Ar type
+values are:
+.Bl -tag -width Ds
+.It Dv Sy all
+All queue types
+.It Dv Sy quorum
+Only quorum queues
+.It Dv Sy classic
+Only classic queues
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues rebalance Qo all Qc --vhost-pattern Qo a-vhost Qc --queue-pattern Qo .* Qc
+.\" ------------------------------------
+.It Cm shrink Ar node
+.Pp
+Shrinks quorum queue clusters by removing any members (replicas) on the given node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues shrink Qo rabbit@decomissioned-node Qc
+.\" ------------------------------------
+.El
+.Ss Replication
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm add_member Ar queue Ar node Fl -vhost Ar virtual-host
+.Pp
+Adds a quorum queue member (replica) on the given node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues add_member --vhost Qo a-vhost Qc Qo a-queue Qc Qo rabbit@new-node Qc
+.\" ------------------------------------
+.It Cm delete_member Ar queue Ar node Fl -vhost Ar virtual-host
+.Pp
+Removes a quorum queue member (replica) on the given node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues delete_member --vhost Qo a-vhost Qc Qo a-queue Qc Qo rabbit@decomissioned-node Qc
+.\" ------------------------------------
+.El
+.Ss Queues
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm quorum_status Ar queue Fl -vhost Ar virtual-host
+.Pp
+Displays quorum status of a quorum queue.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues quorum_status --vhost Qo a-vhost Qc Qo a-queue Qc
+.It Cm check_if_node_is_mirror_sync_critical
+.Pp
+Health check that exits with a non-zero code if there are classic mirrored queues without online synchronised mirrors (queues that would potentially lose data if the target node is shut down).
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues check_if_node_is_mirror_sync_critical
+.It Cm check_if_node_is_quorum_critical
+.Pp
+Health check that exits with a non-zero code if there are queues with minimum online quorum (queues that would lose their quorum if the target node is shut down).
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues check_if_node_is_quorum_critical
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-server.8 b/deps/rabbit/docs/rabbitmq-server.8
new file mode 100644
index 0000000000..6a5e411cb3
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-server.8
@@ -0,0 +1,98 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-SERVER 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-server
+.Nd starts a RabbitMQ node
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl detached
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+RabbitMQ is an open source multi-protocol messaging broker.
+.Pp
+Running
+.Nm
+starts a RabbitMQ node in the foreground. The node will display a startup
+banner and report when startup is complete.
+To shut down the server, use service management tools or
+.Xr rabbitmqctl 8 .
+.\" ------------------------------------------------------------------
+.Sh ENVIRONMENT
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Ev RABBITMQ_CONFIG_FILE
+Defaults to
+.Pa /etc/rabbitmq/rabbitmq.conf .
+Node configuration file path.
+To learn more, see the
+.Lk https://www.rabbitmq.com/configure.html "RabbitMQ Configuration guide"
+.It Ev RABBITMQ_MNESIA_BASE
+Defaults to
+.Pa /var/lib/rabbitmq/mnesia .
+Node data directory will be located (or created) in this directory.
+To learn more, see the
+.Lk https://www.rabbitmq.com/relocate.html "RabbitMQ File and Directory guide"
+.It Ev RABBITMQ_LOG_BASE
+Defaults to
+.Pa /var/log/rabbitmq .
+Log files generated by the server will be placed in this directory.
+To learn more, see the
+.Lk https://www.rabbitmq.com/logging.html "RabbitMQ Logging guide"
+.It Ev RABBITMQ_NODENAME
+Defaults to
+.Qq rabbit@ .
+followed by the computed hostname.
+Can be used to run multiple nodes on the same host.
+Every node in a cluster must have a unique
+.Ev RABBITMQ_NODENAME
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Ev RABBITMQ_NODE_IP_ADDRESS
+By default RabbitMQ will bind to all IPv6 and IPv4 interfaces available.
+This variable limits the node to one network interface or address
+family.
+To learn more, see the
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.It Ev RABBITMQ_NODE_PORT
+AMQP 0-9-1 and AMQP 1.0 port. Defaults to 5672.
+To learn more, see the
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl detached
+Start the server process in the background.
+Note that this will cause the pid not to be written to the pid file.
+.Pp
+For example, runs RabbitMQ AMQP server in the background:
+.sp
+.Dl rabbitmq-server -detached
+.El
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-server.service.example b/deps/rabbit/docs/rabbitmq-server.service.example
new file mode 100644
index 0000000000..dec70eb635
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-server.service.example
@@ -0,0 +1,27 @@
+# systemd unit example
+[Unit]
+Description=RabbitMQ broker
+After=network.target epmd@0.0.0.0.socket
+Wants=network.target epmd@0.0.0.0.socket
+
+[Service]
+Type=notify
+User=rabbitmq
+Group=rabbitmq
+NotifyAccess=all
+TimeoutStartSec=3600
+# Note:
+# You *may* wish to add the following to automatically restart RabbitMQ
+# in the event of a failure. systemd service restarts are not a
+# replacement for service monitoring. Please see
+# https://www.rabbitmq.com/monitoring.html
+#
+# Restart=on-failure
+# RestartSec=10
+WorkingDirectory=/var/lib/rabbitmq
+ExecStart=/usr/lib/rabbitmq/bin/rabbitmq-server
+ExecStop=/usr/lib/rabbitmq/bin/rabbitmqctl stop
+ExecStop=/bin/sh -c "while ps -p $MAINPID >/dev/null 2>&1; do sleep 1; done"
+
+[Install]
+WantedBy=multi-user.target
diff --git a/deps/rabbit/docs/rabbitmq-service.8 b/deps/rabbit/docs/rabbitmq-service.8
new file mode 100644
index 0000000000..154388fcfc
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-service.8
@@ -0,0 +1,152 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-SERVICE.BAT 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-service.bat
+.Nd tool for managing RabbitMQ Windows service
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Ar command
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+RabbitMQ is an open source multi-protocol messaging broker.
+.Pp
+Running
+.Nm
+allows the RabbitMQ broker to be run as a service in
+Windows® environments.
+The RabbitMQ broker service can be started and stopped using the
+Windows® services panel.
+.Pp
+By default the service will run in the authentication context of the
+local system account.
+It is therefore necessary to synchronise Erlang cookies between the
+local system account (typically
+.Pa C:\(rsWindows\(rs.erlang.cookie
+and the account that will be used to run
+.Xr rabbitmqctl 8 .
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Cm help
+Display usage information.
+.It Cm install
+Install the service.
+The service will not be started.
+Subsequent invocations will update the service parameters if relevant
+environment variables were modified.
+.It Cm remove
+Remove the service.
+If the service is running then it will automatically be stopped before
+being removed.
+No files will be deleted as a consequence and
+.Xr rabbitmq-server 8
+will remain operable.
+.It Cm start
+Start the service.
+The service must have been correctly installed beforehand.
+.It Cm stop
+Stop the service.
+The service must be running for this command to have any effect.
+.It Cm disable
+Disable the service.
+This is the equivalent of setting the startup type to
+.Sy Disabled
+using the service control panel.
+.It Cm enable
+Enable the service.
+This is the equivalent of setting the startup type to
+.Sy Automatic
+using the service control panel.
+.El
+.\" ------------------------------------------------------------------
+.Sh ENVIRONMENT
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Ev RABBITMQ_SERVICENAME
+Defaults to RabbitMQ.
+.It Ev RABBITMQ_BASE
+Note: Windows only. Defaults to the application data directory of the
+current user. This is the location of log and database directories.
+.It Ev RABBITMQ_NODENAME
+Defaults to
+.Qq rabbit@ .
+followed by the computed hostname.
+Can be used to run multiple nodes on the same host.
+Every node in a cluster must have a unique
+.Ev RABBITMQ_NODENAME
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Ev RABBITMQ_NODE_IP_ADDRESS
+By default RabbitMQ will bind to all IPv6 and IPv4 interfaces available.
+This variable limits the node to one network interface or address
+family.
+To learn more, see the
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.It Ev RABBITMQ_NODE_PORT
+AMQP 0-9-1 and AMQP 1.0 port. Defaults to 5672.
+To learn more, see the
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.It Ev ERLANG_SERVICE_MANAGER_PATH
+Defaults to
+.Pa C:\(rsProgram\ Files\(rserl{version}\(rserts-{version}\(rsbin
+(or
+.Pa C:\(rsProgram\ Files\ (x86)\(rserl{version}\(rserts-{version}\(rsbin
+for 64-bit environments).
+This is the installation location of the Erlang service manager.
+.It Ev RABBITMQ_CONSOLE_LOG
+Set this variable to
+.Sy new or
+.Sy reuse
+to have the console output from the server redirected to a file named
+.Pa SERVICENAME.debug
+in the application data directory of the user that installed the
+service.
+Under Vista this will be
+.Pa C:\(rsUsers\(rsAppData\(rsusername\(rsSERVICENAME .
+Under previous versions of Windows this will be
+.Pa C:\(rsDocuments and Settings\(rsusername\(rsApplication Data\(rsSERVICENAME .
+If
+.Ev RABBITMQ_CONSOLE_LOG
+is set to
+.Sy new
+then a new file will be created each time the service starts.
+If
+.Ev RABBITMQ_CONSOLE_LOG
+is set to
+.Sy reuse
+then the file will be overwritten each time the service starts.
+The default behaviour when
+.Ev RABBITMQ_CONSOLE_LOG
+is not set or set to a value other than
+.Sy new
+or
+.Sy reuse
+is to discard the server output.
+.El
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-upgrade.8 b/deps/rabbit/docs/rabbitmq-upgrade.8
new file mode 100644
index 0000000000..4fe7283f13
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-upgrade.8
@@ -0,0 +1,108 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-UPGRADE 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-upgrade
+.Nd RabbitMQ installation upgrade tools
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+is a command line tool that provides commands used during the upgrade of RabbitMQ nodes.
+See the
+.Lk https://www.rabbitmq.com/upgrade.html "RabbitMQ upgrade guide"
+to learn more about RabbitMQ installation upgrades.
+.
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Pf rabbit@ Ar target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm help
+.Pp
+Displays general help and commands supported by
+.Nm .
+.\" ------------------------------------
+.It Cm post_upgrade
+.Pp
+Runs post-upgrade tasks. In the current version, it performs the rebalance of mirrored and quorum queues across all nodes in the cluster.
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq.conf.example b/deps/rabbit/docs/rabbitmq.conf.example
new file mode 100644
index 0000000000..17e023e62c
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq.conf.example
@@ -0,0 +1,1002 @@
+# ======================================
+# RabbitMQ broker section
+# ======================================
+
+## Related doc guide: https://rabbitmq.com/configure.html. See
+## https://rabbitmq.com/documentation.html for documentation ToC.
+
+## Networking
+## ====================
+##
+## Related doc guide: https://rabbitmq.com/networking.html.
+##
+## By default, RabbitMQ will listen on all interfaces, using
+## the standard (reserved) AMQP 0-9-1 and 1.0 port.
+##
+# listeners.tcp.default = 5672
+
+
+## To listen on a specific interface, provide an IP address with port.
+## For example, to listen only on localhost for both IPv4 and IPv6:
+##
+# IPv4
+# listeners.tcp.local = 127.0.0.1:5672
+# IPv6
+# listeners.tcp.local_v6 = ::1:5672
+
+## You can define multiple listeners using listener names
+# listeners.tcp.other_port = 5673
+# listeners.tcp.other_ip = 10.10.10.10:5672
+
+
+## TLS listeners are configured in the same fashion as TCP listeners,
+## including the option to control the choice of interface.
+##
+# listeners.ssl.default = 5671
+
+## It is possible to disable regular TCP (non-TLS) listeners. Clients
+## not configured to use TLS and the correct TLS-enabled port won't be able
+## to connect to this node.
+# listeners.tcp = none
+
+## Number of Erlang processes that will accept connections for the TCP
+## and TLS listeners.
+##
+# num_acceptors.tcp = 10
+# num_acceptors.ssl = 10
+
+## Socket writer will force GC every so many bytes transferred.
+## Default is 1 GiB (`1000000000`). Set to 'off' to disable.
+##
+# socket_writer.gc_threshold = 1000000000
+#
+## To disable:
+# socket_writer.gc_threshold = off
+
+## Maximum amount of time allowed for the AMQP 0-9-1 and AMQP 1.0 handshake
+## (performed after socket connection and TLS handshake) to complete, in milliseconds.
+##
+# handshake_timeout = 10000
+
+## Set to 'true' to perform reverse DNS lookups when accepting a
+## connection. rabbitmqctl and management UI will then display hostnames
+## instead of IP addresses. Default value is `false`.
+##
+# reverse_dns_lookups = false
+
+##
+## Security, Access Control
+## ==============
+##
+
+## Related doc guide: https://rabbitmq.com/access-control.html.
+
+## The default "guest" user is only permitted to access the server
+## via a loopback interface (e.g. localhost).
+## {loopback_users, [<<"guest">>]},
+##
+# loopback_users.guest = true
+
+## Uncomment the following line if you want to allow access to the
+## guest user from anywhere on the network.
+# loopback_users.guest = false
+
+## TLS configuration.
+##
+## Related doc guide: https://rabbitmq.com/ssl.html.
+##
+# ssl_options.verify = verify_peer
+# ssl_options.fail_if_no_peer_cert = false
+# ssl_options.cacertfile = /path/to/cacert.pem
+# ssl_options.certfile = /path/to/cert.pem
+# ssl_options.keyfile = /path/to/key.pem
+#
+# ssl_options.honor_cipher_order = true
+# ssl_options.honor_ecc_order = true
+
+# ssl_options.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+# ssl_options.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+# ssl_options.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+# ssl_options.ciphers.4 = ECDHE-RSA-AES256-SHA384
+# ssl_options.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+# ssl_options.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+# ssl_options.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+# ssl_options.ciphers.8 = ECDH-RSA-AES256-SHA384
+# ssl_options.ciphers.9 = DHE-RSA-AES256-GCM-SHA384
+# ssl_options.ciphers.10 = DHE-DSS-AES256-GCM-SHA384
+# ssl_options.ciphers.11 = DHE-RSA-AES256-SHA256
+# ssl_options.ciphers.12 = DHE-DSS-AES256-SHA256
+# ssl_options.ciphers.13 = ECDHE-ECDSA-AES128-GCM-SHA256
+# ssl_options.ciphers.14 = ECDHE-RSA-AES128-GCM-SHA256
+# ssl_options.ciphers.15 = ECDHE-ECDSA-AES128-SHA256
+# ssl_options.ciphers.16 = ECDHE-RSA-AES128-SHA256
+# ssl_options.ciphers.17 = ECDH-ECDSA-AES128-GCM-SHA256
+# ssl_options.ciphers.18 = ECDH-RSA-AES128-GCM-SHA256
+# ssl_options.ciphers.19 = ECDH-ECDSA-AES128-SHA256
+# ssl_options.ciphers.20 = ECDH-RSA-AES128-SHA256
+# ssl_options.ciphers.21 = DHE-RSA-AES128-GCM-SHA256
+# ssl_options.ciphers.22 = DHE-DSS-AES128-GCM-SHA256
+# ssl_options.ciphers.23 = DHE-RSA-AES128-SHA256
+# ssl_options.ciphers.24 = DHE-DSS-AES128-SHA256
+# ssl_options.ciphers.25 = ECDHE-ECDSA-AES256-SHA
+# ssl_options.ciphers.26 = ECDHE-RSA-AES256-SHA
+# ssl_options.ciphers.27 = DHE-RSA-AES256-SHA
+# ssl_options.ciphers.28 = DHE-DSS-AES256-SHA
+# ssl_options.ciphers.29 = ECDH-ECDSA-AES256-SHA
+# ssl_options.ciphers.30 = ECDH-RSA-AES256-SHA
+# ssl_options.ciphers.31 = ECDHE-ECDSA-AES128-SHA
+# ssl_options.ciphers.32 = ECDHE-RSA-AES128-SHA
+# ssl_options.ciphers.33 = DHE-RSA-AES128-SHA
+# ssl_options.ciphers.34 = DHE-DSS-AES128-SHA
+# ssl_options.ciphers.35 = ECDH-ECDSA-AES128-SHA
+# ssl_options.ciphers.36 = ECDH-RSA-AES128-SHA
+
+## Select an authentication/authorisation backend to use.
+##
+## Alternative backends are provided by plugins, such as rabbitmq-auth-backend-ldap.
+##
+## NB: These settings require certain plugins to be enabled.
+##
+## Related doc guides:
+##
+## * https://rabbitmq.com/plugins.html
+## * https://rabbitmq.com/access-control.html
+##
+
+# auth_backends.1 = rabbit_auth_backend_internal
+
+## uses separate backends for authentication and authorisation,
+## see below.
+# auth_backends.1.authn = rabbit_auth_backend_ldap
+# auth_backends.1.authz = rabbit_auth_backend_internal
+
+## The rabbitmq_auth_backend_ldap plugin allows the broker to
+## perform authentication and authorisation by deferring to an
+## external LDAP server.
+##
+## Relevant doc guides:
+##
+## * https://rabbitmq.com/ldap.html
+## * https://rabbitmq.com/access-control.html
+##
+## uses LDAP for both authentication and authorisation
+# auth_backends.1 = rabbit_auth_backend_ldap
+
+## uses HTTP service for both authentication and
+## authorisation
+# auth_backends.1 = rabbit_auth_backend_http
+
+## uses two backends in a chain: HTTP first, then internal
+# auth_backends.1 = rabbit_auth_backend_http
+# auth_backends.2 = rabbit_auth_backend_internal
+
+## Authentication
+## The built-in mechanisms are 'PLAIN',
+## 'AMQPLAIN', and 'EXTERNAL' Additional mechanisms can be added via
+## plugins.
+##
+## Related doc guide: https://rabbitmq.com/authentication.html.
+##
+# auth_mechanisms.1 = PLAIN
+# auth_mechanisms.2 = AMQPLAIN
+
+## The rabbitmq-auth-mechanism-ssl plugin makes it possible to
+## authenticate a user based on the client's x509 (TLS) certificate.
+## Related doc guide: https://rabbitmq.com/authentication.html.
+##
+## To use auth-mechanism-ssl, the EXTERNAL mechanism should
+## be enabled:
+##
+# auth_mechanisms.1 = PLAIN
+# auth_mechanisms.2 = AMQPLAIN
+# auth_mechanisms.3 = EXTERNAL
+
+## To force x509 certificate-based authentication on all clients,
+## exclude all other mechanisms (note: this will disable password-based
+## authentication even for the management UI!):
+##
+# auth_mechanisms.1 = EXTERNAL
+
+## This pertains to both the rabbitmq-auth-mechanism-ssl plugin and
+## STOMP ssl_cert_login configurations. See the RabbitMQ STOMP plugin
+## configuration section later in this file and the README in
+## https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
+## details.
+##
+## To use the TLS cert's CN instead of its DN as the username
+##
+# ssl_cert_login_from = common_name
+
+## TLS handshake timeout, in milliseconds.
+##
+# ssl_handshake_timeout = 5000
+
+
+## Cluster name
+##
+# cluster_name = dev3.eng.megacorp.local
+
+## Password hashing implementation. Will only affect newly
+## created users. To recalculate hash for an existing user
+## it's necessary to update her password.
+##
+## To use SHA-512, set to rabbit_password_hashing_sha512.
+##
+# password_hashing_module = rabbit_password_hashing_sha256
+
+## When importing definitions exported from versions earlier
+## than 3.6.0, it is possible to go back to MD5 (only do this
+## as a temporary measure!) by setting this to rabbit_password_hashing_md5.
+##
+# password_hashing_module = rabbit_password_hashing_md5
+
+##
+## Default User / VHost
+## ====================
+##
+
+## On first start RabbitMQ will create a vhost and a user. These
+## config items control what gets created.
+## Relevant doc guide: https://rabbitmq.com/access-control.html
+##
+# default_vhost = /
+# default_user = guest
+# default_pass = guest
+
+# default_permissions.configure = .*
+# default_permissions.read = .*
+# default_permissions.write = .*
+
+## Tags for default user
+##
+## For more details about tags, see the documentation for the
+## Management Plugin at https://rabbitmq.com/management.html.
+##
+# default_user_tags.administrator = true
+
+## Define other tags like this:
+# default_user_tags.management = true
+# default_user_tags.custom_tag = true
+
+##
+## Additional network and protocol related configuration
+## =====================================================
+##
+
+## Set the default AMQP 0-9-1 heartbeat interval (in seconds).
+## Related doc guides:
+##
+## * https://rabbitmq.com/heartbeats.html
+## * https://rabbitmq.com/networking.html
+##
+# heartbeat = 60
+
+## Set the max permissible size of an AMQP frame (in bytes).
+##
+# frame_max = 131072
+
+## Set the max frame size the server will accept before connection
+## tuning occurs
+##
+# initial_frame_max = 4096
+
+## Set the max permissible number of channels per connection.
+## 0 means "no limit".
+##
+# channel_max = 128
+
+## Customising TCP Listener (Socket) Configuration.
+##
+## Related doc guides:
+##
+## * https://rabbitmq.com/networking.html
+## * https://www.erlang.org/doc/man/inet.html#setopts-2
+##
+
+# tcp_listen_options.backlog = 128
+# tcp_listen_options.nodelay = true
+# tcp_listen_options.exit_on_close = false
+#
+# tcp_listen_options.keepalive = true
+# tcp_listen_options.send_timeout = 15000
+#
+# tcp_listen_options.buffer = 196608
+# tcp_listen_options.sndbuf = 196608
+# tcp_listen_options.recbuf = 196608
+
+##
+## Resource Limits & Flow Control
+## ==============================
+##
+## Related doc guide: https://rabbitmq.com/memory.html.
+
+## Memory-based Flow Control threshold.
+##
+# vm_memory_high_watermark.relative = 0.4
+
+## Alternatively, we can set a limit (in bytes) of RAM used by the node.
+##
+# vm_memory_high_watermark.absolute = 1073741824
+
+## Or you can set absolute value using memory units (with RabbitMQ 3.6.0+).
+## Absolute watermark will be ignored if relative is defined!
+##
+# vm_memory_high_watermark.absolute = 2GB
+##
+## Supported unit symbols:
+##
+## k, kiB: kibibytes (2^10 - 1,024 bytes)
+## M, MiB: mebibytes (2^20 - 1,048,576 bytes)
+## G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
+## kB: kilobytes (10^3 - 1,000 bytes)
+## MB: megabytes (10^6 - 1,000,000 bytes)
+## GB: gigabytes (10^9 - 1,000,000,000 bytes)
+
+
+
+## Fraction of the high watermark limit at which queues start to
+## page message out to disc in order to free up memory.
+## For example, when vm_memory_high_watermark is set to 0.4 and this value is set to 0.5,
+## paging can begin as early as when 20% of total available RAM is used by the node.
+##
+## Values greater than 1.0 can be dangerous and should be used carefully.
+##
+## One alternative to this is to use durable queues and publish messages
+## as persistent (delivery mode = 2). With this combination queues will
+## move messages to disk much more rapidly.
+##
+## Another alternative is to configure queues to page all messages (both
+## persistent and transient) to disk as quickly
+## as possible, see https://rabbitmq.com/lazy-queues.html.
+##
+# vm_memory_high_watermark_paging_ratio = 0.5
+
+## Selects Erlang VM memory consumption calculation strategy. Can be `allocated`, `rss` or `legacy` (aliased as `erlang`),
+## Introduced in 3.6.11. `rss` is the default as of 3.6.12.
+## See https://github.com/rabbitmq/rabbitmq-server/issues/1223 and rabbitmq/rabbitmq-common#224 for background.
+# vm_memory_calculation_strategy = rss
+
+## Interval (in milliseconds) at which we perform the check of the memory
+## levels against the watermarks.
+##
+# memory_monitor_interval = 2500
+
+## The total memory available can be calculated from the OS resources
+## - default option - or provided as a configuration parameter.
+# total_memory_available_override_value = 2GB
+
+## Set disk free limit (in bytes). Once free disk space reaches this
+## lower bound, a disk alarm will be set - see the documentation
+## listed above for more details.
+##
+## Absolute watermark will be ignored if relative is defined!
+# disk_free_limit.absolute = 50000
+
+## Or you can set it using memory units (same as in vm_memory_high_watermark)
+## with RabbitMQ 3.6.0+.
+# disk_free_limit.absolute = 500KB
+# disk_free_limit.absolute = 50mb
+# disk_free_limit.absolute = 5GB
+
+## Alternatively, we can set a limit relative to total available RAM.
+##
+## Values lower than 1.0 can be dangerous and should be used carefully.
+# disk_free_limit.relative = 2.0
+
+##
+## Clustering
+## =====================
+##
+# cluster_partition_handling = ignore
+
+## pause_if_all_down strategy require additional configuration
+# cluster_partition_handling = pause_if_all_down
+
+## Recover strategy. Can be either 'autoheal' or 'ignore'
+# cluster_partition_handling.pause_if_all_down.recover = ignore
+
+## Node names to check
+# cluster_partition_handling.pause_if_all_down.nodes.1 = rabbit@localhost
+# cluster_partition_handling.pause_if_all_down.nodes.2 = hare@localhost
+
+## Mirror sync batch size, in messages. Increasing this will speed
+## up syncing but total batch size in bytes must not exceed 2 GiB.
+## Available in RabbitMQ 3.6.0 or later.
+##
+# mirroring_sync_batch_size = 4096
+
+## Make clustering happen *automatically* at startup. Only applied
+## to nodes that have just been reset or started for the first time.
+##
+## Relevant doc guide: https://rabbitmq.com//cluster-formation.html
+##
+
+# cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+#
+# cluster_formation.classic_config.nodes.1 = rabbit1@hostname
+# cluster_formation.classic_config.nodes.2 = rabbit2@hostname
+# cluster_formation.classic_config.nodes.3 = rabbit3@hostname
+# cluster_formation.classic_config.nodes.4 = rabbit4@hostname
+
+## DNS-based peer discovery. This backend will list A records
+## of the configured hostname and perform reverse lookups for
+## the addresses returned.
+
+# cluster_formation.peer_discovery_backend = rabbit_peer_discovery_dns
+# cluster_formation.dns.hostname = discovery.eng.example.local
+
+## This node's type can be configured. If you are not sure
+## what node type to use, always use 'disc'.
+# cluster_formation.node_type = disc
+
+## Interval (in milliseconds) at which we send keepalive messages
+## to other cluster members. Note that this is not the same thing
+## as net_ticktime; missed keepalive messages will not cause nodes
+## to be considered down.
+##
+# cluster_keepalive_interval = 10000
+
+##
+## Statistics Collection
+## =====================
+##
+
+## Statistics collection interval (in milliseconds). Increasing
+## this will reduce the load on management database.
+##
+# collect_statistics_interval = 5000
+
+## Fine vs. coarse statistics
+#
+# This value is no longer meant to be configured directly.
+#
+# See https://www.rabbitmq.com/management.html#fine-stats.
+
+##
+## Ra Settings
+## =====================
+##
+# raft.segment_max_entries = 65536
+# raft.wal_max_size_bytes = 1048576
+# raft.wal_max_batch_size = 4096
+# raft.snapshot_chunk_size = 1000000
+
+##
+## Misc/Advanced Options
+## =====================
+##
+## NB: Change these only if you understand what you are doing!
+##
+
+## Timeout used when waiting for Mnesia tables in a cluster to
+## become available.
+##
+# mnesia_table_loading_retry_timeout = 30000
+
+## Retries when waiting for Mnesia tables in the cluster startup. Note that
+## this setting is not applied to Mnesia upgrades or node deletions.
+##
+# mnesia_table_loading_retry_limit = 10
+
+## Size in bytes below which to embed messages in the queue index.
+## Related doc guide: https://rabbitmq.com/persistence-conf.html
+##
+# queue_index_embed_msgs_below = 4096
+
+## You can also set this size in memory units
+##
+# queue_index_embed_msgs_below = 4kb
+
+## Whether or not to enable background periodic forced GC runs for all
+## Erlang processes on the node in "waiting" state.
+##
+## Disabling background GC may reduce latency for client operations,
+## keeping it enabled may reduce median RAM usage by the binary heap
+## (see https://www.erlang-solutions.com/blog/erlang-garbage-collector.html).
+##
+## Before trying this option, please take a look at the memory
+## breakdown (https://www.rabbitmq.com/memory-use.html).
+##
+# background_gc_enabled = false
+
+## Target (desired) interval (in milliseconds) at which we run background GC.
+## The actual interval will vary depending on how long it takes to execute
+## the operation (can be higher than this interval). Values less than
+## 30000 milliseconds are not recommended.
+##
+# background_gc_target_interval = 60000
+
+## Whether or not to enable proxy protocol support.
+## Once enabled, clients cannot directly connect to the broker
+## anymore. They must connect through a load balancer that sends the
+## proxy protocol header to the broker at connection time.
+## This setting applies only to AMQP clients, other protocols
+## like MQTT or STOMP have their own setting to enable proxy protocol.
+## See the plugins documentation for more information.
+##
+# proxy_protocol = false
+
+## Overriden product name and version.
+## They are set to "RabbitMQ" and the release version by default.
+# product.name = RabbitMQ
+# product.version = 1.2.3
+
+## "Message of the day" file.
+## Its content is used to expand the logged and printed banners.
+## Default to /etc/rabbitmq/motd on Unix, %APPDATA%\RabbitMQ\motd.txt
+## on Windows.
+# motd_file = /etc/rabbitmq/motd
+
+## ----------------------------------------------------------------------------
+## Advanced Erlang Networking/Clustering Options.
+##
+## Related doc guide: https://rabbitmq.com/clustering.html
+## ----------------------------------------------------------------------------
+
+# ======================================
+# Kernel section
+# ======================================
+
+## Timeout used to detect peer unavailability, including CLI tools.
+## Related doc guide: https://www.rabbitmq.com/nettick.html.
+##
+# net_ticktime = 60
+
+## Inter-node communication port range.
+## The parameters inet_dist_listen_min and inet_dist_listen_max
+## can be configured in the classic config format only.
+## Related doc guide: https://www.rabbitmq.com/networking.html#epmd-inet-dist-port-range.
+
+
+## ----------------------------------------------------------------------------
+## RabbitMQ Management Plugin
+##
+## Related doc guide: https://rabbitmq.com/management.html.
+## ----------------------------------------------------------------------------
+
+# =======================================
+# Management section
+# =======================================
+
+## Preload schema definitions from the following JSON file.
+## Related doc guide: https://rabbitmq.com/management.html#load-definitions.
+##
+# management.load_definitions = /path/to/exported/definitions.json
+
+## Log all requests to the management HTTP API to a file.
+##
+# management.http_log_dir = /path/to/access.log
+
+## HTTP listener and embedded Web server settings.
+# ## See https://rabbitmq.com/management.html for details.
+#
+# management.tcp.port = 15672
+# management.tcp.ip = 0.0.0.0
+#
+# management.tcp.shutdown_timeout = 7000
+# management.tcp.max_keepalive = 120
+# management.tcp.idle_timeout = 120
+# management.tcp.inactivity_timeout = 120
+# management.tcp.request_timeout = 120
+# management.tcp.compress = true
+
+## HTTPS listener settings.
+## See https://rabbitmq.com/management.html and https://rabbitmq.com/ssl.html for details.
+##
+# management.ssl.port = 15671
+# management.ssl.cacertfile = /path/to/ca_certificate.pem
+# management.ssl.certfile = /path/to/server_certificate.pem
+# management.ssl.keyfile = /path/to/server_key.pem
+
+## More TLS options
+# management.ssl.honor_cipher_order = true
+# management.ssl.honor_ecc_order = true
+# management.ssl.client_renegotiation = false
+# management.ssl.secure_renegotiate = true
+
+## Supported TLS versions
+# management.ssl.versions.1 = tlsv1.2
+# management.ssl.versions.2 = tlsv1.1
+
+## Cipher suites the server is allowed to use
+# management.ssl.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+# management.ssl.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+# management.ssl.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+# management.ssl.ciphers.4 = ECDHE-RSA-AES256-SHA384
+# management.ssl.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+# management.ssl.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+# management.ssl.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+# management.ssl.ciphers.8 = ECDH-RSA-AES256-SHA384
+# management.ssl.ciphers.9 = DHE-RSA-AES256-GCM-SHA384
+
+## URL path prefix for HTTP API and management UI
+# management.path_prefix = /a-prefix
+
+## One of 'basic', 'detailed' or 'none'. See
+## https://rabbitmq.com/management.html#fine-stats for more details.
+# management.rates_mode = basic
+
+## Configure how long aggregated data (such as message rates and queue
+## lengths) is retained. Please read the plugin's documentation in
+## https://rabbitmq.com/management.html#configuration for more
+## details.
+## Your can use 'minute', 'hour' and 'day' keys or integer key (in seconds)
+# management.sample_retention_policies.global.minute = 5
+# management.sample_retention_policies.global.hour = 60
+# management.sample_retention_policies.global.day = 1200
+
+# management.sample_retention_policies.basic.minute = 5
+# management.sample_retention_policies.basic.hour = 60
+
+# management.sample_retention_policies.detailed.10 = 5
+
+## ----------------------------------------------------------------------------
+## RabbitMQ Shovel Plugin
+##
+## Related doc guide: https://rabbitmq.com/shovel.html
+## ----------------------------------------------------------------------------
+
+## See advanced.config.example for a Shovel plugin example
+
+
+## ----------------------------------------------------------------------------
+## RabbitMQ STOMP Plugin
+##
+## Related doc guide: https://rabbitmq.com/stomp.html
+## ----------------------------------------------------------------------------
+
+# =======================================
+# STOMP section
+# =======================================
+
+## See https://rabbitmq.com/stomp.html for details.
+
+## TCP listeners.
+##
+# stomp.listeners.tcp.1 = 127.0.0.1:61613
+# stomp.listeners.tcp.2 = ::1:61613
+
+## TCP listener settings
+##
+# stomp.tcp_listen_options.backlog = 2048
+# stomp.tcp_listen_options.recbuf = 131072
+# stomp.tcp_listen_options.sndbuf = 131072
+#
+# stomp.tcp_listen_options.keepalive = true
+# stomp.tcp_listen_options.nodelay = true
+#
+# stomp.tcp_listen_options.exit_on_close = true
+# stomp.tcp_listen_options.send_timeout = 120
+
+## Proxy protocol support
+##
+# stomp.proxy_protocol = false
+
+## TLS listeners
+## See https://rabbitmq.com/stomp.html and https://rabbitmq.com/ssl.html for details.
+# stomp.listeners.ssl.default = 61614
+#
+# ssl_options.cacertfile = path/to/cacert.pem
+# ssl_options.certfile = path/to/cert.pem
+# ssl_options.keyfile = path/to/key.pem
+# ssl_options.verify = verify_peer
+# ssl_options.fail_if_no_peer_cert = true
+
+
+## Number of Erlang processes that will accept connections for the TCP
+## and TLS listeners.
+##
+# stomp.num_acceptors.tcp = 10
+# stomp.num_acceptors.ssl = 1
+
+## Additional TLS options
+
+## Extract a name from the client's certificate when using TLS.
+##
+# stomp.ssl_cert_login = true
+
+## Set a default user name and password. This is used as the default login
+## whenever a CONNECT frame omits the login and passcode headers.
+##
+## Please note that setting this will allow clients to connect without
+## authenticating!
+##
+# stomp.default_user = guest
+# stomp.default_pass = guest
+
+## If a default user is configured, or you have configured use TLS client
+## certificate based authentication, you can choose to allow clients to
+## omit the CONNECT frame entirely. If set to true, the client is
+## automatically connected as the default user or user supplied in the
+## TLS certificate whenever the first frame sent on a session is not a
+## CONNECT frame.
+##
+# stomp.implicit_connect = true
+
+## Whether or not to enable proxy protocol support.
+## Once enabled, clients cannot directly connect to the broker
+## anymore. They must connect through a load balancer that sends the
+## proxy protocol header to the broker at connection time.
+## This setting applies only to STOMP clients, other protocols
+## like MQTT or AMQP have their own setting to enable proxy protocol.
+## See the plugins or broker documentation for more information.
+##
+# stomp.proxy_protocol = false
+
+## ----------------------------------------------------------------------------
+## RabbitMQ MQTT Adapter
+##
+## See https://github.com/rabbitmq/rabbitmq-mqtt/blob/stable/README.md
+## for details
+## ----------------------------------------------------------------------------
+
+# =======================================
+# MQTT section
+# =======================================
+
+## TCP listener settings.
+##
+# mqtt.listeners.tcp.1 = 127.0.0.1:61613
+# mqtt.listeners.tcp.2 = ::1:61613
+
+## TCP listener options (as per the broker configuration).
+##
+# mqtt.tcp_listen_options.backlog = 4096
+# mqtt.tcp_listen_options.recbuf = 131072
+# mqtt.tcp_listen_options.sndbuf = 131072
+#
+# mqtt.tcp_listen_options.keepalive = true
+# mqtt.tcp_listen_options.nodelay = true
+#
+# mqtt.tcp_listen_options.exit_on_close = true
+# mqtt.tcp_listen_options.send_timeout = 120
+
+## TLS listener settings
+## ## See https://rabbitmq.com/mqtt.html and https://rabbitmq.com/ssl.html for details.
+#
+# mqtt.listeners.ssl.default = 8883
+#
+# ssl_options.cacertfile = /path/to/tls/ca_certificate_bundle.pem
+# ssl_options.certfile = /path/to/tls/server_certificate.pem
+# ssl_options.keyfile = /path/to/tls/server_key.pem
+# ssl_options.verify = verify_peer
+# ssl_options.fail_if_no_peer_cert = true
+#
+
+
+## Number of Erlang processes that will accept connections for the TCP
+## and TLS listeners.
+##
+# mqtt.num_acceptors.tcp = 10
+# mqtt.num_acceptors.ssl = 10
+
+## Whether or not to enable proxy protocol support.
+## Once enabled, clients cannot directly connect to the broker
+## anymore. They must connect through a load balancer that sends the
+## proxy protocol header to the broker at connection time.
+## This setting applies only to STOMP clients, other protocols
+## like STOMP or AMQP have their own setting to enable proxy protocol.
+## See the plugins or broker documentation for more information.
+##
+# mqtt.proxy_protocol = false
+
+## Set the default user name and password used for anonymous connections (when client
+## provides no credentials). Anonymous connections are highly discouraged!
+##
+# mqtt.default_user = guest
+# mqtt.default_pass = guest
+
+## Enable anonymous connections. If this is set to false, clients MUST provide
+## credentials in order to connect. See also the mqtt.default_user/mqtt.default_pass
+## keys. Anonymous connections are highly discouraged!
+##
+# mqtt.allow_anonymous = true
+
+## If you have multiple vhosts, specify the one to which the
+## adapter connects.
+##
+# mqtt.vhost = /
+
+## Specify the exchange to which messages from MQTT clients are published.
+##
+# mqtt.exchange = amq.topic
+
+## Specify TTL (time to live) to control the lifetime of non-clean sessions.
+##
+# mqtt.subscription_ttl = 1800000
+
+## Set the prefetch count (governing the maximum number of unacknowledged
+## messages that will be delivered).
+##
+# mqtt.prefetch = 10
+
+
+## ----------------------------------------------------------------------------
+## RabbitMQ AMQP 1.0 Support
+##
+## See https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md.
+## ----------------------------------------------------------------------------
+
+# =======================================
+# AMQP 1.0 section
+# =======================================
+
+
+## Connections that are not authenticated with SASL will connect as this
+## account. See the README for more information.
+##
+## Please note that setting this will allow clients to connect without
+## authenticating!
+##
+# amqp1_0.default_user = guest
+
+## Enable protocol strict mode. See the README for more information.
+##
+# amqp1_0.protocol_strict_mode = false
+
+## Logging settings.
+##
+## See https://rabbitmq.com/logging.html and https://github.com/erlang-lager/lager for details.
+##
+
+## Log directory, taken from the RABBITMQ_LOG_BASE env variable by default.
+##
+# log.dir = /var/log/rabbitmq
+
+## Logging to file. Can be false or a filename.
+## Default:
+# log.file = rabbit.log
+
+## To disable logging to a file
+# log.file = false
+
+## Log level for file logging
+##
+# log.file.level = info
+
+## File rotation config. No rotation by default.
+## DO NOT SET rotation date to ''. Leave the value unset if "" is the desired value
+# log.file.rotation.date = $D0
+# log.file.rotation.size = 0
+
+## Logging to console (can be true or false)
+##
+# log.console = false
+
+## Log level for console logging
+##
+# log.console.level = info
+
+## Logging to the amq.rabbitmq.log exchange (can be true or false)
+##
+# log.exchange = false
+
+## Log level to use when logging to the amq.rabbitmq.log exchange
+##
+# log.exchange.level = info
+
+
+
+## ----------------------------------------------------------------------------
+## RabbitMQ LDAP Plugin
+##
+## Related doc guide: https://rabbitmq.com/ldap.html.
+##
+## ----------------------------------------------------------------------------
+
+# =======================================
+# LDAP section
+# =======================================
+
+##
+## Connecting to the LDAP server(s)
+## ================================
+##
+
+## Specify servers to bind to. You *must* set this in order for the plugin
+## to work properly.
+##
+# auth_ldap.servers.1 = your-server-name-goes-here
+
+## You can define multiple servers
+# auth_ldap.servers.2 = your-other-server
+
+## Connect to the LDAP server using TLS
+##
+# auth_ldap.use_ssl = false
+
+## Specify the LDAP port to connect to
+##
+# auth_ldap.port = 389
+
+## LDAP connection timeout, in milliseconds or 'infinity'
+##
+# auth_ldap.timeout = infinity
+
+## Or number
+# auth_ldap.timeout = 500
+
+## Enable logging of LDAP queries.
+## One of
+## - false (no logging is performed)
+## - true (verbose logging of the logic used by the plugin)
+## - network (as true, but additionally logs LDAP network traffic)
+##
+## Defaults to false.
+##
+# auth_ldap.log = false
+
+## Also can be true or network
+# auth_ldap.log = true
+# auth_ldap.log = network
+
+##
+## Authentication
+## ==============
+##
+
+## Pattern to convert the username given through AMQP to a DN before
+## binding
+##
+# auth_ldap.user_dn_pattern = cn=${username},ou=People,dc=example,dc=com
+
+## Alternatively, you can convert a username to a Distinguished
+## Name via an LDAP lookup after binding. See the documentation for
+## full details.
+
+## When converting a username to a dn via a lookup, set these to
+## the name of the attribute that represents the user name, and the
+## base DN for the lookup query.
+##
+# auth_ldap.dn_lookup_attribute = userPrincipalName
+# auth_ldap.dn_lookup_base = DC=gopivotal,DC=com
+
+## Controls how to bind for authorisation queries and also to
+## retrieve the details of users logging in without presenting a
+## password (e.g., SASL EXTERNAL).
+## One of
+## - as_user (to bind as the authenticated user - requires a password)
+## - anon (to bind anonymously)
+## - {UserDN, Password} (to bind with a specified user name and password)
+##
+## Defaults to 'as_user'.
+##
+# auth_ldap.other_bind = as_user
+
+## Or can be more complex:
+# auth_ldap.other_bind.user_dn = User
+# auth_ldap.other_bind.password = Password
+
+## If user_dn and password defined - other options is ignored.
+
+# -----------------------------
+# Too complex section of LDAP
+# -----------------------------
+
+##
+## Authorisation
+## =============
+##
+
+## The LDAP plugin can perform a variety of queries against your
+## LDAP server to determine questions of authorisation.
+##
+## Related doc guide: https://rabbitmq.com/ldap.html#authorisation.
+
+## Following configuration should be defined in advanced.config file
+## DO NOT UNCOMMENT THESE LINES!
+
+## Set the query to use when determining vhost access
+##
+## {vhost_access_query, {in_group,
+## "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}},
+
+## Set the query to use when determining resource (e.g., queue) access
+##
+## {resource_access_query, {constant, true}},
+
+## Set queries to determine which tags a user has
+##
+## {tag_queries, []}
+# ]},
+# -----------------------------
diff --git a/deps/rabbit/docs/rabbitmqctl.8 b/deps/rabbit/docs/rabbitmqctl.8
new file mode 100644
index 0000000000..3e041ad2c8
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmqctl.8
@@ -0,0 +1,2424 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQCTL 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmqctl
+.Nd tool for managing RabbitMQ nodes
+.\" ------------------------------------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------------------------------------
+RabbitMQ is an open source multi-protocol messaging broker.
+.Pp
+.Nm
+is a command line tool for managing a RabbitMQ server node.
+It performs all actions by connecting to the target RabbitMQ node
+on a dedicated CLI tool communication port and authenticating
+using a shared secret (known as the cookie file).
+.Pp
+Diagnostic information is displayed if connection failed,
+the target node was not running, or
+.Nm
+could not authenticate to
+the target node successfully.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+and
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.\" ------------------------------------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Ar rabbit@target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl -no-table-headers
+Do not output headers for tabular data.
+.It Fl -dry-run
+Do not run the command.
+Only print information message.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Cm help Oo Fl l Oc Op Ar command_name
+.Pp
+Prints usage for all available commands.
+.Bl -tag -width Ds
+.It Fl l , Fl -list-commands
+List command usages only, without parameter explanation.
+.It Ar command_name
+Prints usage for the specified command.
+.El
+.\" ------------------------------------------------------------------
+.It Cm version
+.Pp
+Displays CLI tools version
+.El
+.\" ------------------------------------------------------------------
+.\" ## Nodes
+.\" ------------------------------------------------------------------
+.Ss Nodes
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm await_startup
+.Pp
+Waits for the RabbitMQ application to start on the target node
+.Pp
+For example, to wait for the RabbitMQ application to start:
+.sp
+.Dl rabbitmqctl await_startup
+.\" ------------------------------------------------------------------
+.It Cm reset
+.Pp
+Returns a RabbitMQ node to its virgin state.
+.Pp
+Removes the node from any cluster it belongs to, removes all data from
+the management database, such as configured users and vhosts, and
+deletes all persistent messages.
+.Pp
+For
+.Cm reset
+and
+.Cm force_reset
+to succeed the RabbitMQ application must have been stopped, e.g. with
+.Cm stop_app .
+.Pp
+For example, to reset the RabbitMQ node:
+.sp
+.Dl rabbitmqctl reset
+.\" ------------------------------------------------------------------
+.It Cm rotate_logs
+.Pp
+Instructs the RabbitMQ node to perform internal log rotation.
+.Pp
+Log rotation is performed according to lager settings specified in
+configuration file.
+.Pp
+Note that there is no need to call this command in case of external log
+rotation (e.g. from logrotate(8)), because lager detects renames and
+automatically reopens log files.
+.Pp
+For example, this command starts internal log rotation
+process:
+.sp
+.Dl rabbitmqctl rotate_logs
+.Pp
+Rotation is performed asynchronously, so there is no guarantee that it
+will be completed when this command returns.
+.\" ------------------------------------------------------------------
+.It Cm shutdown
+.Pp
+Shuts down the node, both RabbitMQ and its runtime.
+The command is blocking and will return after the runtime process exits.
+If RabbitMQ fails to stop, it will return a non-zero exit code.
+This command infers the OS PID of the target node and
+therefore can only be used to shut down nodes running on the same
+host (or broadly speaking, in the same operating system,
+e.g. in the same VM or container)
+.Pp
+Unlike the stop command, the shutdown command:
+.Bl -bullet
+.It
+does not require a
+.Ar pid_file
+to wait for the runtime process to exit
+.It
+returns a non-zero exit code if RabbitMQ node is not running
+.El
+.Pp
+For example, this will shut down a locally running RabbitMQ node
+with default node name:
+.sp
+.Dl rabbitmqctl shutdown
+.\" ------------------------------------------------------------------
+.It Cm start_app
+.Pp
+Starts the RabbitMQ application.
+.Pp
+This command is typically run after performing other management actions
+that required the RabbitMQ application to be stopped, e.g.\&
+.Cm reset .
+.Pp
+For example, to instruct the RabbitMQ node to start the RabbitMQ
+application:
+.sp
+.Dl rabbitmqctl start_app
+.\" ------------------------------------------------------------------
+.It Cm stop Op Ar pid_file
+.Pp
+Stops the Erlang node on which RabbitMQ is running.
+To restart the node follow the instructions for
+.Qq Running the Server
+in the
+.Lk https://rabbitmq.com/download.html installation guide .
+.Pp
+If a
+.Ar pid_file
+is specified, also waits for the process specified there to terminate.
+See the description of the
+.Cm wait
+command for details on this file.
+.Pp
+For example, to instruct the RabbitMQ node to terminate:
+.sp
+.Dl rabbitmqctl stop
+.\" ------------------------------------------------------------------
+.It Cm stop_app
+.Pp
+Stops the RabbitMQ application, leaving the runtime (Erlang VM) running.
+.Pp
+This command is typically run prior to performing other management
+actions that require the RabbitMQ application to be stopped, e.g.\&
+.Cm reset .
+.Pp
+For example, to instruct the RabbitMQ node to stop the RabbitMQ
+application:
+.sp
+.Dl rabbitmqctl stop_app
+.\" ------------------------------------------------------------------
+.It Cm wait Ar pid_file , Cm wait Fl -pid Ar pid
+.Pp
+Waits for the RabbitMQ application to start.
+.Pp
+This command will wait for the RabbitMQ application to start at the
+node.
+It will wait for the pid file to be created if
+.Ar pidfile
+is specified, then for a process with a pid specified in the pid file or
+the
+.Fl -pid
+argument, and then for the RabbitMQ application to start in that process.
+It will fail if the process terminates without starting the RabbitMQ
+application.
+.Pp
+If the specified pidfile is not created or erlang node is not started within
+.Fl -timeout
+the command will fail.
+Default timeout is 10 seconds.
+.Pp
+A suitable pid file is created by the
+.Xr rabbitmq-server 8
+script.
+By default this is located in the Mnesia directory.
+Modify the
+.Ev RABBITMQ_PID_FILE
+environment variable to change the location.
+.Pp
+For example, this command will return when the RabbitMQ node has started
+up:
+.sp
+.Dl rabbitmqctl wait /var/run/rabbitmq/pid
+.El
+.\" ------------------------------------------------------------------
+.\" ## Cluster Operations
+.\" ------------------------------------------------------------------
+.Ss Cluster management
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm await_online_nodes Ar count
+.Pp
+Waits for
+.Ar count
+nodes to join the cluster
+.Pp
+For example, to wait for two RabbitMQ nodes to start:
+.sp
+.Dl rabbitmqctl await_online_nodes 2
+.\" ------------------------------------------------------------------
+.It Cm change_cluster_node_type Ar type
+.Pp
+Changes the type of the cluster node.
+.Pp
+The
+.Ar type
+must be one of the following:
+.Bl -bullet -compact
+.It
+.Cm disc
+.It
+.Cm ram
+.El
+.Pp
+The node must be stopped for this operation to succeed, and when turning
+a node into a RAM node the node must not be the only disc node in the
+cluster.
+.Pp
+For example, this command will turn a RAM node into a disc node:
+.sp
+.Dl rabbitmqctl change_cluster_node_type disc
+.\" ------------------------------------------------------------------
+.It Cm cluster_status
+.Pp
+Displays all the nodes in the cluster grouped by node type, together
+with the currently running nodes.
+.Pp
+For example, this command displays the nodes in the cluster:
+.sp
+.Dl rabbitmqctl cluster_status
+.\" ------------------------------------------------------------------
+.It Cm force_boot
+.Pp
+Ensures that the node will start next time, even if it was not the last
+to shut down.
+.Pp
+Normally when you shut down a RabbitMQ cluster altogether, the first
+node you restart should be the last one to go down, since it may have
+seen things happen that other nodes did not.
+But sometimes that's not possible: for instance if the entire cluster
+loses power then all nodes may think they were not the last to shut
+down.
+.Pp
+In such a case you can invoke
+.Cm force_boot
+while the node is down.
+This will tell the node to unconditionally start next time you ask it
+to.
+If any changes happened to the cluster after this node shut down, they
+will be lost.
+.Pp
+If the last node to go down is permanently lost then you should use
+.Cm forget_cluster_node Fl -offline
+in preference to this command, as it will ensure that mirrored queues
+which were mastered on the lost node get promoted.
+.Pp
+For example, this will force the node not to wait for other nodes next
+time it is started:
+.sp
+.Dl rabbitmqctl force_boot
+.\" ------------------------------------------------------------------
+.It Cm force_reset
+.Pp
+Forcefully returns a RabbitMQ node to its virgin state.
+.Pp
+The
+.Cm force_reset
+command differs from
+.Cm reset
+in that it resets the node unconditionally, regardless of the current
+management database state and cluster configuration.
+It should only be used as a last resort if the database or cluster
+configuration has been corrupted.
+.Pp
+For
+.Cm reset
+and
+.Cm force_reset
+to succeed the RabbitMQ application must have been stopped, e.g. with
+.Cm stop_app .
+.Pp
+For example, to reset the RabbitMQ node:
+.sp
+.Dl rabbitmqctl force_reset
+.\" ------------------------------------------------------------------
+.It Cm forget_cluster_node Op Fl -offline
+.Bl -tag -width Ds
+.It Fl -offline
+Enables node removal from an offline node.
+This is only useful in the situation where all the nodes are offline and
+the last node to go down cannot be brought online, thus preventing the
+whole cluster from starting.
+It should not be used in any other circumstances since it can lead to
+inconsistencies.
+.El
+.Pp
+Removes a cluster node remotely.
+The node that is being removed must be offline, while the node we are
+removing from must be online, except when using the
+.Fl -offline
+flag.
+.Pp
+When using the
+.Fl -offline
+flag ,
+.Nm
+will not attempt to connect to a node as normal; instead it will
+temporarily become the node in order to make the change.
+This is useful if the node cannot be started normally.
+In this case the node will become the canonical source for cluster
+metadata (e.g. which queues exist), even if it was not before.
+Therefore you should use this command on the latest node to shut down if
+at all possible.
+.Pp
+For example, this command will remove the node
+.Qq rabbit@stringer
+from the node
+.Qq hare@mcnulty :
+.sp
+.Dl rabbitmqctl -n hare@mcnulty forget_cluster_node rabbit@stringer
+.\" ------------------------------------------------------------------
+.It Cm join_cluster Ar seed-node Op Fl -ram
+.Bl -tag -width Ds
+.It Ar seed-node
+Existing cluster member (seed node) to cluster with.
+.It Fl -ram
+If provided, the node will join the cluster as a RAM node.
+RAM node use is discouraged. Use only if you understand why
+exactly you need to use them.
+.El
+.Pp
+Instructs the node to become a member of the cluster that the specified
+node is in.
+Before clustering, the node is reset, so be careful when using this
+command.
+For this command to succeed the RabbitMQ application must have been
+stopped, e.g. with
+.Cm stop_app .
+.Pp
+Cluster nodes can be of two types: disc or RAM.
+Disc nodes replicate data in RAM and on disc, thus providing redundancy
+in the event of node failure and recovery from global events such as
+power failure across all nodes.
+RAM nodes replicate data in RAM only (with the exception of queue
+contents, which can reside on disc if the queue is persistent or too big
+to fit in memory) and are mainly used for scalability.
+RAM nodes are more performant only when managing resources (e.g.\&
+adding/removing queues, exchanges, or bindings).
+A cluster must always have at least one disc node, and usually should
+have more than one.
+.Pp
+The node will be a disc node by default.
+If you wish to create a RAM node, provide the
+.Fl -ram
+flag.
+.Pp
+After executing the
+.Cm join_cluster
+command, whenever the RabbitMQ application is started on the current
+node it will attempt to connect to the nodes that were in the cluster
+when the node went down.
+.Pp
+To leave a cluster,
+.Cm reset
+the node.
+You can also remove nodes remotely with the
+.Cm forget_cluster_node
+command.
+.Pp
+For example, this command instructs the RabbitMQ node to join the cluster that
+.Qq hare@elena
+is part of, as a ram node:
+.sp
+.Dl rabbitmqctl join_cluster hare@elena --ram
+.Pp
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide".
+.\" ------------------------------------------------------------------
+.It Cm rename_cluster_node Ar oldnode1 Ar newnode1 Op Ar oldnode2 Ar newnode2 ...
+.Pp
+Supports renaming of cluster nodes in the local database.
+.Pp
+This subcommand causes
+.Nm
+to temporarily become the node in order to make the change.
+The local cluster node must therefore be completely stopped; other nodes
+can be online or offline.
+.Pp
+This subcommand takes an even number of arguments, in pairs representing
+the old and new names for nodes.
+You must specify the old and new names for this node and for any other
+nodes that are stopped and being renamed at the same time.
+.Pp
+It is possible to stop all nodes and rename them all simultaneously (in
+which case old and new names for all nodes must be given to every node)
+or stop and rename nodes one at a time (in which case each node only
+needs to be told how its own name is changing).
+.Pp
+For example, this command will rename the node
+.Qq rabbit@misshelpful
+to the node
+.Qq rabbit@cordelia
+.sp
+.Dl rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia
+.Pp
+Note that this command only changes the local database.
+It may also be necessary to rename the local database directories,
+and to configure the new node name.
+For example:
+.sp
+.Bl -enum -compact
+.It
+Stop the node:
+.sp
+.Dl rabbitmqctl stop rabbit@misshelpful
+.sp
+.It
+Rename the node in the local database:
+.sp
+.Dl rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia
+.sp
+.It
+Rename the local database directories (note, you do not need to do this
+if you have set the RABBITMQ_MNESIA_DIR environment variable):
+.sp
+.Bd -literal -offset indent -compact
+mv \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@cordelia
+mv \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful-rename \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@cordelia-rename
+mv \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful-plugins-expand \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@cordelia-plugins-expand
+.Ed
+.sp
+.It
+If node name is configured e.g. using
+.Ar /etc/rabbitmq/rabbitmq-env.conf
+it has also be updated there.
+.sp
+.It
+Start the node when ready
+.El
+.\" ------------------------------------------------------------------
+.It Cm update_cluster_nodes Ar clusternode
+.Bl -tag -width Ds
+.It Ar clusternode
+The node to consult for up-to-date information.
+.El
+.Pp
+Instructs an already clustered node to contact
+.Ar clusternode
+to cluster when booting up.
+This is different from
+.Cm join_cluster
+since it does not join any cluster - it checks that the node is already
+in a cluster with
+.Ar clusternode .
+.Pp
+The need for this command is motivated by the fact that clusters can
+change while a node is offline.
+Consider a situation where node
+.Va rabbit@A
+and
+.Va rabbit@B
+are clustered.
+.Va rabbit@A
+goes down,
+.Va rabbit@C
+clusters with
+.Va rabbit@B ,
+and then
+.Va rabbit@B
+leaves the cluster.
+When
+.Va rabbit@A
+starts back up, it'll try to contact
+.Va rabbit@B ,
+but this will fail since
+.Va rabbit@B
+is not in the cluster anymore.
+The following command will rename node
+.Va rabbit@B
+to
+.Va rabbit@C
+on node
+.Va rabbitA
+.sp
+.Dl update_cluster_nodes -n Va rabbit@A Va rabbit@B Va rabbit@C
+.Pp
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.El
+.\" ------------------------------------------------------------------
+.\" ## Classic Mirrored Queues
+.\" ------------------------------------------------------------------
+.Ss Replication
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm sync_queue Oo Fl p Ar vhost Oc Ar queue
+.Bl -tag -width Ds
+.It Ar queue
+The name of the queue to synchronise.
+.El
+.Pp
+Instructs a mirrored queue with unsynchronised mirrors (follower replicas)
+to synchronise them.
+The queue will block while synchronisation takes place (all publishers
+to and consumers using the queue will block or temporarily see no activity).
+This command can only be used with mirrored queues.
+To learn more, see the
+.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide"
+.Pp
+Note that queues with unsynchronised replicas and active consumers
+will become synchronised eventually (assuming that consumers make progress).
+This command is primarily useful for queues which do not have active consumers.
+.\" ------------------------------------------------------------------
+.It Cm cancel_sync_queue Oo Fl p Ar vhost Oc Ar queue
+.Bl -tag -width Ds
+.It Ar queue
+The name of the queue to cancel synchronisation for.
+.El
+.Pp
+Instructs a synchronising mirrored queue to stop synchronising itself.
+.El
+.\" ------------------------------------------------------------------
+.\" ## User management
+.\" ------------------------------------------------------------------
+.Ss User Management
+Note that all user management commands
+.Nm
+only can manage users in the internal RabbitMQ database.
+Users from any alternative authentication backends such as LDAP cannot be inspected
+or managed with those commands.
+.Nm .
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm add_user Ar username Ar password
+.Bl -tag -width Ds
+.It Ar username
+The name of the user to create.
+.It Ar password
+The password the created user will use to log in to the broker.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to create a (non-administrative) user named
+.Qq janeway
+with (initial) password
+.Qq changeit :
+.sp
+.Dl rabbitmqctl add_user janeway changeit
+.\" ------------------------------------------------------------------
+.It Cm authenticate_user Ar username Ar password
+.Bl -tag -width Ds
+.It Ar username
+The name of the user.
+.It Ar password
+The password of the user.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to authenticate the user named
+.Qq janeway
+with password
+.Qq verifyit :
+.sp
+.Dl rabbitmqctl authenticate_user janeway verifyit
+.\" ------------------------------------------------------------------
+.It Cm change_password Ar username Ar newpassword
+.Bl -tag -width Ds
+.It Ar username
+The name of the user whose password is to be changed.
+.It Ar newpassword
+The new password for the user.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to change the
+password for the user named
+.Qq janeway
+to
+.Qq newpass :
+.sp
+.Dl rabbitmqctl change_password janeway newpass
+.\" ------------------------------------------------------------------
+.It Cm clear_password Ar username
+.Bl -tag -width Ds
+.It Ar username
+The name of the user whose password is to be cleared.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to clear the
+password for the user named
+.Qq janeway :
+.sp
+.Dl rabbitmqctl clear_password janeway
+.Pp
+This user now cannot log in with a password (but may be able to through
+e.g. SASL EXTERNAL if configured).
+.\" ------------------------------------------------------------------
+.It Cm delete_user Ar username
+.Bl -tag -width Ds
+.It Ar username
+The name of the user to delete.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to delete the user named
+.Qq janeway :
+.sp
+.Dl rabbitmqctl delete_user janeway
+.\" ------------------------------------------------------------------
+.It Cm list_users
+.Pp
+Lists users.
+Each result row will contain the user name followed by a list of the
+tags set for that user.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all users:
+.sp
+.Dl rabbitmqctl list_users
+.\" ------------------------------------------------------------------
+.It Cm set_user_tags Ar username Op Ar tag ...
+.Bl -tag -width Ds
+.It Ar username
+The name of the user whose tags are to be set.
+.It Ar tag
+Zero, one or more tags to set.
+Any existing tags will be removed.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to ensure the user named
+.Qq janeway
+is an administrator:
+.sp
+.Dl rabbitmqctl set_user_tags janeway administrator
+.Pp
+This has no effect when the user logs in via AMQP, but can be used to
+permit the user to manage users, virtual hosts and permissions when
+the user logs in via some other means (for example with the management
+plugin).
+.Pp
+This command instructs the RabbitMQ broker to remove any tags from the user named
+.Qq janeway :
+.sp
+.Dl rabbitmqctl set_user_tags janeway
+.El
+.\" ------------------------------------------------------------------
+.\" ## Access Control
+.\" ------------------------------------------------------------------
+.Ss Access control
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm clear_permissions Oo Fl p Ar vhost Oc Ar username
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host to which to deny the user access,
+defaulting to
+.Qq / .
+.It Ar username
+The name of the user to deny access to the specified virtual host.
+.El
+.Pp
+Sets user permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to deny the user
+named
+.Qq janeway
+access to the virtual host called
+.Qq my-vhost :
+.sp
+.Dl rabbitmqctl clear_permissions -p my-vhost janeway
+.\" ------------------------------------------------------------------
+.It Cm clear_topic_permissions Oo Fl p Ar vhost Oc Ar username Oo Ar exchange Oc
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host to which to clear the topic permissions,
+defaulting to
+.Qq / .
+.It Ar username
+The name of the user to clear topic permissions to the specified virtual host.
+.It Ar exchange
+The name of the topic exchange to clear topic permissions, defaulting to all the
+topic exchanges the given user has topic permissions for.
+.El
+.Pp
+Clear user topic permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to remove topic permissions for user
+named
+.Qq janeway
+for the topic exchange
+.Qq amq.topic
+in the virtual host called
+.Qq my-vhost :
+.sp
+.Dl rabbitmqctl clear_topic_permissions -p my-vhost janeway amq.topic
+.\" ------------------------------------------------------------------
+.It Cm list_permissions Op Fl p Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host for which to list the users that have been
+granted access to it, and their permissions.
+Defaults to
+.Qq / .
+.El
+.Pp
+Lists permissions in a virtual host.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all the
+users which have been granted access to the virtual host called
+.Qq my-vhost ,
+and the permissions they have for operations on resources in that
+virtual host.
+Note that an empty string means no permissions granted:
+.sp
+.Dl rabbitmqctl list_permissions -p my-vhost
+.\" ------------------------------------------------------------------
+.It Cm list_topic_permissions Op Fl p Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host for which to list the users topic permissions.
+Defaults to
+.Qq / .
+.El
+.Pp
+Lists topic permissions in a virtual host.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all the
+users which have been granted topic permissions in the virtual host called
+.Qq my-vhost:
+.sp
+.Dl rabbitmqctl list_topic_permissions -p my-vhost
+.\" ------------------------------------------------------------------
+.It Cm list_user_permissions Ar username
+.Bl -tag -width Ds
+.It Ar username
+The name of the user for which to list the permissions.
+.El
+.Pp
+Lists user permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all the
+virtual hosts to which the user named
+.Qq janeway
+has been granted access, and the permissions the user has for operations
+on resources in these virtual hosts:
+.sp
+.Dl rabbitmqctl list_user_permissions janeway
+.\" ------------------------------------------------------------------
+.It Cm list_user_topic_permissions Ar username
+.Bl -tag -width Ds
+.It Ar username
+The name of the user for which to list the topic permissions.
+.El
+.Pp
+Lists user topic permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all the
+virtual hosts to which the user named
+.Qq janeway
+has been granted access, and the topic permissions the user has in these virtual hosts:
+.sp
+.Dl rabbitmqctl list_topic_user_permissions janeway
+.\" ------------------------------------------------------------------
+.It Cm list_vhosts Op Ar vhostinfoitem ...
+.Pp
+Lists virtual hosts.
+.Pp
+The
+.Ar vhostinfoitem
+parameter is used to indicate which virtual host information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar vhostinfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm name
+The name of the virtual host with non-ASCII characters escaped as in C.
+.It Cm tracing
+Whether tracing is enabled for this virtual host.
+.El
+.Pp
+If no
+.Ar vhostinfoitem
+are specified then the vhost name is displayed.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all
+virtual hosts:
+.sp
+.Dl rabbitmqctl list_vhosts name tracing
+.\" ------------------------------------------------------------------
+.It Cm set_permissions Oo Fl p Ar vhost Oc Ar user Ar conf Ar write Ar read
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host to which to grant the user access,
+defaulting to
+.Qq / .
+.It Ar user
+The name of the user to grant access to the specified virtual host.
+.It Ar conf
+A regular expression matching resource names for which the user is
+granted configure permissions.
+.It Ar write
+A regular expression matching resource names for which the user is
+granted write permissions.
+.It Ar read
+A regular expression matching resource names for which the user is
+granted read permissions.
+.El
+.Pp
+Sets user permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to grant the
+user named
+.Qq janeway
+access to the virtual host called
+.Qq my-vhost ,
+with configure permissions on all resources whose names starts with
+.Qq janeway- ,
+and write and read permissions on all resources:
+.sp
+.Dl rabbitmqctl set_permissions -p my-vhost janeway Qo ^janeway-.* Qc Qo .* Qc Qq .*
+.\" ------------------------------------------------------------------
+.It Cm set_topic_permissions Oo Fl p Ar vhost Oc Ar user Ar exchange Ar write Ar read
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host to which to grant the user access,
+defaulting to
+.Qq / .
+.It Ar user
+The name of the user the permissions apply to in the target virtual host.
+.It Ar exchange
+The name of the topic exchange the authorisation check will be applied to.
+.It Ar write
+A regular expression matching the routing key of the published message.
+.It Ar read
+A regular expression matching the routing key of the consumed message.
+.El
+.Pp
+Sets user topic permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to let the
+user named
+.Qq janeway
+publish and consume messages going through the
+.Qq amp.topic
+exchange of the
+.Qq my-vhost
+virtual host with a routing key starting with
+.Qq janeway- :
+.sp
+.Dl rabbitmqctl set_topic_permissions -p my-vhost janeway amq.topic Qo ^janeway-.* Qc Qo ^janeway-.* Qc
+.Pp
+Topic permissions support variable expansion for the following variables:
+username, vhost, and client_id. Note that client_id is expanded only when using MQTT.
+The previous example could be made more generic by using
+.Qq ^{username}-.* :
+.sp
+.Dl rabbitmqctl set_topic_permissions -p my-vhost janeway amq.topic Qo ^{username}-.* Qc Qo ^{username}-.* Qc
+.El
+.\" ------------------------------------------------------------------
+.\" ## Monitoring and Observability
+.\" ------------------------------------------------------------------
+.Ss Monitoring, observability and health checks
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm environment
+.Pp
+Displays the name and value of each variable in the application
+environment for each running application.
+.\" ------------------------------------------------------------------
+.It Cm list_bindings Oo Fl p Ar vhost Oc Op Ar bindinginfoitem ...
+.Pp
+Returns binding details.
+By default the bindings for the
+.Qq /
+virtual host are returned.
+The
+.Fl p
+flag can be used to override this default.
+.Pp
+The
+.Ar bindinginfoitem
+parameter is used to indicate which binding information items to include
+in the results.
+The column order in the results will match the order of the parameters.
+.Ar bindinginfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm source_name
+The name of the source of messages to which the binding is attached.
+With non-ASCII characters escaped as in C.
+.It Cm source_kind
+The kind of the source of messages to which the binding is attached.
+Currently always exchange.
+With non-ASCII characters escaped as in C.
+.It Cm destination_name
+The name of the destination of messages to which the binding is
+attached.
+With non-ASCII characters escaped as in C.
+.It Cm destination_kind
+The kind of the destination of messages to which the binding is
+attached.
+With non-ASCII characters escaped as in C.
+.It Cm routing_key
+The binding's routing key, with non-ASCII characters escaped as in C.
+.It Cm arguments
+The binding's arguments.
+.El
+.Pp
+If no
+.Ar bindinginfoitem
+are specified then all above items are displayed.
+.Pp
+For example, this command displays the exchange name and queue name of
+the bindings in the virtual host named
+.Qq my-vhost
+.sp
+.Dl rabbitmqctl list_bindings -p my-vhost exchange_name queue_name
+.\" ------------------------------------------------------------------
+.It Cm list_channels Op Ar channelinfoitem ...
+.Pp
+Returns information on all current channels, the logical containers
+executing most AMQP commands.
+This includes channels that are part of ordinary AMQP connections, and
+channels created by various plug-ins and other extensions.
+.Pp
+The
+.Ar channelinfoitem
+parameter is used to indicate which channel information items to include
+in the results.
+The column order in the results will match the order of the parameters.
+.Ar channelinfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm pid
+Id of the Erlang process associated with the connection.
+.It Cm connection
+Id of the Erlang process associated with the connection to which the
+channel belongs.
+.It Cm name
+Readable name for the channel.
+.It Cm number
+The number of the channel, which uniquely identifies it within a
+connection.
+.It Cm user
+Username associated with the channel.
+.It Cm vhost
+Virtual host in which the channel operates.
+.It Cm transactional
+True if the channel is in transactional mode, false otherwise.
+.It Cm confirm
+True if the channel is in confirm mode, false otherwise.
+.It Cm consumer_count
+Number of logical AMQP consumers retrieving messages via the channel.
+.It Cm messages_unacknowledged
+Number of messages delivered via this channel but not yet acknowledged.
+.It Cm messages_uncommitted
+Number of messages received in an as yet uncommitted transaction.
+.It Cm acks_uncommitted
+Number of acknowledgements received in an as yet uncommitted transaction.
+.It Cm messages_unconfirmed
+Number of published messages not yet confirmed.
+On channels not in confirm mode, this remains 0.
+.It Cm prefetch_count
+QoS prefetch limit for new consumers, 0 if unlimited.
+.It Cm global_prefetch_count
+QoS prefetch limit for the entire channel, 0 if unlimited.
+.El
+.Pp
+If no
+.Ar channelinfoitem
+are specified then pid, user, consumer_count, and
+messages_unacknowledged are assumed.
+.Pp
+For example, this command displays the connection process and count of
+unacknowledged messages for each channel:
+.sp
+.Dl rabbitmqctl list_channels connection messages_unacknowledged
+.\" ------------------------------------------------------------------
+.It Cm list_ciphers
+.Pp
+Lists cipher suites supported by encoding commands.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all
+cipher suites supported by encoding commands:
+.sp
+.Dl rabbitmqctl list_ciphers
+.\" ------------------------------------------------------------------
+.It Cm list_connections Op Ar connectioninfoitem ...
+.Pp
+Returns TCP/IP connection statistics.
+.Pp
+The
+.Ar connectioninfoitem
+parameter is used to indicate which connection information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar connectioninfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm pid
+Id of the Erlang process associated with the connection.
+.It Cm name
+Readable name for the connection.
+.It Cm port
+Server port.
+.It Cm host
+Server hostname obtained via reverse DNS, or its IP address if reverse
+DNS failed or was disabled.
+.It Cm peer_port
+Peer port.
+.It Cm peer_host
+Peer hostname obtained via reverse DNS, or its IP address if reverse DNS
+failed or was not enabled.
+.It Cm ssl
+Boolean indicating whether the connection is secured with SSL.
+.It Cm ssl_protocol
+SSL protocol (e.g.\&
+.Qq tlsv1 ) .
+.It Cm ssl_key_exchange
+SSL key exchange algorithm (e.g.\&
+.Qq rsa ) .
+.It Cm ssl_cipher
+SSL cipher algorithm (e.g.\&
+.Qq aes_256_cbc ) .
+.It Cm ssl_hash
+SSL hash function (e.g.\&
+.Qq sha ) .
+.It Cm peer_cert_subject
+The subject of the peer's SSL certificate, in RFC4514 form.
+.It Cm peer_cert_issuer
+The issuer of the peer's SSL certificate, in RFC4514 form.
+.It Cm peer_cert_validity
+The period for which the peer's SSL certificate is valid.
+.It Cm state
+Connection state; one of:
+.Bl -bullet -compact
+.It
+starting
+.It
+tuning
+.It
+opening
+.It
+running
+.It
+flow
+.It
+blocking
+.It
+blocked
+.It
+closing
+.It
+closed
+.El
+.It Cm channels
+Number of channels using the connection.
+.It Cm protocol
+Version of the AMQP protocol in use; currently one of:
+.Bl -bullet -compact
+.It
+{0,9,1}
+.It
+{0,8,0}
+.El
+.Pp
+Note that if a client requests an AMQP 0-9 connection, we treat it as
+AMQP 0-9-1.
+.It Cm auth_mechanism
+SASL authentication mechanism used, such as
+.Qq PLAIN .
+.It Cm user
+Username associated with the connection.
+.It Cm vhost
+Virtual host name with non-ASCII characters escaped as in C.
+.It Cm timeout
+Connection timeout / negotiated heartbeat interval, in seconds.
+.It Cm frame_max
+Maximum frame size (bytes).
+.It Cm channel_max
+Maximum number of channels on this connection.
+.It Cm client_properties
+Informational properties transmitted by the client during connection
+establishment.
+.It Cm recv_oct
+Octets received.
+.It Cm recv_cnt
+Packets received.
+.It Cm send_oct
+Octets send.
+.It Cm send_cnt
+Packets sent.
+.It Cm send_pend
+Send queue size.
+.It Cm connected_at
+Date and time this connection was established, as timestamp.
+.El
+.Pp
+If no
+.Ar connectioninfoitem
+are specified then user, peer host, peer port, time since flow control
+and memory block state are displayed.
+.Pp
+For example, this command displays the send queue size and server port
+for each connection:
+.sp
+.Dl rabbitmqctl list_connections send_pend port
+.\" ------------------------------------------------------------------
+.It Cm list_consumers Op Fl p Ar vhost
+.Pp
+Lists consumers, i.e. subscriptions to a queue\'s message stream.
+Each line printed shows, separated by tab characters, the name of
+the queue subscribed to, the id of the channel process via which the
+subscription was created and is managed, the consumer tag which uniquely
+identifies the subscription within a channel, a boolean indicating
+whether acknowledgements are expected for messages delivered to this
+consumer, an integer indicating the prefetch limit (with 0 meaning
+.Qq none ) ,
+and any arguments for this consumer.
+.\" ------------------------------------------------------------------
+.It Cm list_exchanges Oo Fl p Ar vhost Oc Op Ar exchangeinfoitem ...
+.Pp
+Returns exchange details.
+Exchange details of the
+.Qq /
+virtual host are returned if the
+.Fl p
+flag is absent.
+The
+.Fl p
+flag can be used to override this default.
+.Pp
+The
+.Ar exchangeinfoitem
+parameter is used to indicate which exchange information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar exchangeinfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm name
+The name of the exchange with non-ASCII characters escaped as in C.
+.It Cm type
+The exchange type, such as:
+.Bl -bullet -compact
+.It
+direct
+.It
+topic
+.It
+headers
+.It
+fanout
+.El
+.It Cm durable
+Whether or not the exchange survives server restarts.
+.It Cm auto_delete
+Whether the exchange will be deleted automatically when no longer used.
+.It Cm internal
+Whether the exchange is internal, i.e. cannot be directly published to
+by a client.
+.It Cm arguments
+Exchange arguments.
+.It Cm policy
+Policy name for applying to the exchange.
+.El
+.Pp
+If no
+.Ar exchangeinfoitem
+are specified then exchange name and type are displayed.
+.Pp
+For example, this command displays the name and type for each exchange
+of the virtual host named
+.Qq my-vhost :
+.sp
+.Dl rabbitmqctl list_exchanges -p my-vhost name type
+.\" ------------------------------------------------------------------
+.It Cm list_hashes
+.Pp
+Lists hash functions supported by encoding commands.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all hash
+functions supported by encoding commands:
+.sp
+.Dl rabbitmqctl list_hashes
+.\" ------------------------------------------------------------------
+.It Cm list_queues Oo Fl p Ar vhost Oc Oo Fl -offline | Fl -online | Fl -local Oc Op Ar queueinfoitem ...
+.Pp
+Returns queue details.
+Queue details of the
+.Qq /
+virtual host are returned if the
+.Fl p
+flag is absent.
+The
+.Fl p
+flag can be used to override this default.
+.Pp
+Displayed queues can be filtered by their status or location using one
+of the following mutually exclusive options:
+.Bl -tag -width Ds
+.It Fl -offline
+List only those durable queues that are not currently available (more
+specifically, their master node isn't).
+.It Fl -online
+List queues that are currently available (their master node is).
+.It Fl -local
+List only those queues whose master process is located on the current
+node.
+.El
+.Pp
+The
+.Ar queueinfoitem
+parameter is used to indicate which queue information items to include
+in the results.
+The column order in the results will match the order of the parameters.
+.Ar queueinfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm name
+The name of the queue with non\-ASCII characters escaped as in C.
+.It Cm durable
+Whether or not the queue survives server restarts.
+.It Cm auto_delete
+Whether the queue will be deleted automatically when no longer used.
+.It Cm arguments
+Queue arguments.
+.It Cm policy
+Effective policy name for the queue.
+.It Cm pid
+Erlang process identifier of the queue.
+.It Cm owner_pid
+Id of the Erlang process of the connection which is the
+exclusive owner of the queue.
+Empty if the queue is non-exclusive.
+.It Cm exclusive
+True if queue is exclusive (i.e. has owner_pid), false otherwise.
+.It Cm exclusive_consumer_pid
+Id of the Erlang process representing the channel of the exclusive
+consumer subscribed to this queue.
+Empty if there is no exclusive consumer.
+.It Cm exclusive_consumer_tag
+Consumer tag of the exclusive consumer subscribed to this queue.
+Empty if there is no exclusive consumer.
+.It Cm messages_ready
+Number of messages ready to be delivered to clients.
+.It Cm messages_unacknowledged
+Number of messages delivered to clients but not yet acknowledged.
+.It Cm messages
+Sum of ready and unacknowledged messages (queue depth).
+.It Cm messages_ready_ram
+Number of messages from messages_ready which are resident in ram.
+.It Cm messages_unacknowledged_ram
+Number of messages from messages_unacknowledged which are resident in
+ram.
+.It Cm messages_ram
+Total number of messages which are resident in ram.
+.It Cm messages_persistent
+Total number of persistent messages in the queue (will always be 0 for
+transient queues).
+.It Cm message_bytes
+Sum of the size of all message bodies in the queue.
+This does not include the message properties (including headers) or any
+overhead.
+.It Cm message_bytes_ready
+Like
+.Cm message_bytes
+but counting only those messages ready to be delivered to clients.
+.It Cm message_bytes_unacknowledged
+Like
+.Cm message_bytes
+but counting only those messages delivered to clients but not yet
+acknowledged.
+.It Cm message_bytes_ram
+Like
+.Cm message_bytes
+but counting only those messages which are currently held in RAM.
+.It Cm message_bytes_persistent
+Like
+.Cm message_bytes
+but counting only those messages which are persistent.
+.It Cm head_message_timestamp
+The timestamp property of the first message in the queue, if present.
+Timestamps of messages only appear when they are in the paged-in state.
+.It Cm disk_reads
+Total number of times messages have been read from disk by this queue
+since it started.
+.It Cm disk_writes
+Total number of times messages have been written to disk by this queue
+since it started.
+.It Cm consumers
+Number of consumers.
+.It Cm consumer_utilisation
+Fraction of the time (between 0.0 and 1.0) that the queue is able to
+immediately deliver messages to consumers.
+This can be less than 1.0 if consumers are limited by network congestion
+or prefetch count.
+.It Cm memory
+Bytes of memory allocated by the runtime for the
+queue, including stack, heap and internal structures.
+.It Cm slave_pids
+If the queue is mirrored, this lists the IDs of the mirrors (follower replicas).
+To learn more, see the
+.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide"
+.It Cm synchronised_slave_pids
+If the queue is mirrored, this gives the IDs of the mirrors (follower replicas) which
+are synchronised with the master (leader). To learn more, see the
+.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide"
+.It Cm state
+The state of the queue.
+Normally
+.Qq running ,
+but may be
+.Qq Bro syncing, Ar message_count Brc
+if the queue is synchronising.
+.Pp
+Queues which are located on cluster nodes that are currently down will
+be shown with a status of
+.Qq down
+(and most other
+.Ar queueinfoitem
+will be unavailable).
+.El
+.Pp
+If no
+.Ar queueinfoitem
+are specified then queue name and depth are displayed.
+.Pp
+For example, this command displays the depth and number of consumers for
+each queue of the virtual host named
+.Qq my-vhost
+.sp
+.Dl rabbitmqctl list_queues -p my-vhost messages consumers
+.\" ------------------------------------------------------------------
+.It Cm list_unresponsive_queues Oo Fl -local Oc Oo Fl -queue-timeout Ar milliseconds Oc Oo Ar column ... Oc Op Fl -no-table-headers
+.Pp
+Tests queues to respond within timeout. Lists those which did not respond
+.Pp
+For example, this command lists only those unresponsive queues whose master process
+is located on the current node.
+.Sp
+.Dl rabbitmqctl list_unresponsive_queues --local name
+.\" ------------------------------------------------------------------
+.It Cm ping
+.Pp
+Checks that the node OS process is up, registered with EPMD and CLI tools can authenticate with it
+.Pp
+Example:
+.Dl rabbitmqctl ping -n rabbit@hostname
+.\" ------------------------------------------------------------------
+.It Cm report
+.Pp
+Generate a server status report containing a concatenation of all server
+status information for support purposes.
+The output should be redirected to a file when accompanying a support
+request.
+.Pp
+For example, this command creates a server report which may be attached
+to a support request email:
+.sp
+.Dl rabbitmqctl report > server_report.txt
+.\" ------------------------------------------------------------------
+.It Cm schema_info Oo Fl -no-table-headers Oc Op Ar column ...
+.Pp
+Lists schema database tables and their properties
+.Pp
+For example, this command lists the table names and their active replicas:
+.sp
+.Dl rabbitmqctl schema_info name active_replicas
+.\" ------------------------------------------------------------------
+.It Cm status
+.Pp
+Displays broker status information such as the running applications on
+the current Erlang node, RabbitMQ and Erlang versions, OS name, memory
+and file descriptor statistics.
+(See the
+.Cm cluster_status
+command to find out which nodes are clustered and running.)
+.Pp
+For example, this command displays information about the RabbitMQ
+broker:
+.sp
+.Dl rabbitmqctl status
+.El
+.\" ------------------------------------------------------------------
+.\" ## Runtime Parameters and Policies
+.\" ------------------------------------------------------------------
+.Ss Runtime Parameters and Policies
+Certain features of RabbitMQ (such as the Federation plugin) are
+controlled by dynamic, cluster-wide
+.Em parameters.
+There are 2 kinds of parameters: parameters scoped to a virtual host and
+global parameters.
+Each vhost-scoped parameter consists of a component name, a name and a
+value.
+The component name and name are strings, and the value is a valid JSON document.
+A global parameter consists of a name and value.
+The name is a string and the value is an arbitrary Erlang data structure.
+Parameters can be set, cleared and listed.
+In general you should refer to the documentation for the feature in
+question to see how to set parameters.
+.Pp
+Policies is a feature built on top of runtime parameters.
+Policies are used to control and modify the behaviour of queues and
+exchanges on a cluster-wide basis.
+Policies apply within a given vhost, and consist of a name, pattern,
+definition and an optional priority.
+Policies can be set, cleared and listed.
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm clear_global_parameter Ar name
+.Pp
+Clears a global runtime parameter.
+This is similar to
+.Cm clear_parameter
+but the key-value pair isn't tied to a virtual host.
+.Bl -tag -width Ds
+.It Ar name
+The name of the global runtime parameter being cleared.
+.El
+.Pp
+For example, this command clears the global runtime parameter
+.Qq mqtt_default_vhosts :
+.sp
+.Dl rabbitmqctl clear_global_parameter mqtt_default_vhosts
+.\" ------------------------------------------------------------------
+.It Cm clear_parameter Oo Fl p Ar vhost Oc Ar component_name Ar key
+.Pp
+Clears a parameter.
+.Bl -tag -width Ds
+.It Ar component_name
+The name of the component for which the parameter is being cleared.
+.It Ar name
+The name of the parameter being cleared.
+.El
+.Pp
+For example, this command clears the parameter
+.Qq node01
+for the
+.Qq federation-upstream
+component in the default virtual host:
+.sp
+.Dl rabbitmqctl clear_parameter federation-upstream node01
+.\" ------------------------------------------------------------------
+.It Cm list_global_parameters
+.Pp
+Lists all global runtime parameters.
+This is similar to
+.Cm list_parameters
+but the global runtime parameters are not tied to any virtual host.
+.Pp
+For example, this command lists all global parameters:
+.sp
+.Dl rabbitmqctl list_global_parameters
+.\" ------------------------------------------------------------------
+.It Cm list_parameters Op Fl p Ar vhost
+.Pp
+Lists all parameters for a virtual host.
+.Pp
+For example, this command lists all parameters in the default virtual
+host:
+.sp
+.Dl rabbitmqctl list_parameters
+.\" ------------------------------------------------------------------
+.It Cm set_global_parameter Ar name Ar value
+.Pp
+Sets a global runtime parameter.
+This is similar to
+.Cm set_parameter
+but the key-value pair isn't tied to a virtual host.
+.Bl -tag -width Ds
+.It Ar name
+The name of the global runtime parameter being set.
+.It Ar value
+The value for the global runtime parameter, as a JSON term.
+In most shells you are very likely to need to quote this.
+.El
+.Pp
+For example, this command sets the global runtime parameter
+.Qq mqtt_default_vhosts
+to the JSON term {"O=client,CN=guest":"/"}:
+.sp
+.Dl rabbitmqctl set_global_parameter mqtt_default_vhosts '{"O=client,CN=guest":"/"}'
+.\" ------------------------------------------------------------------
+.It Cm set_parameter Oo Fl p Ar vhost Oc Ar component_name Ar name Ar value
+.Pp
+Sets a parameter.
+.Bl -tag -width Ds
+.It Ar component_name
+The name of the component for which the parameter is being set.
+.It Ar name
+The name of the parameter being set.
+.It Ar value
+The value for the parameter, as a JSON term.
+In most shells you are very likely to need to quote this.
+.El
+.Pp
+For example, this command sets the parameter
+.Qq node01
+for the
+.Qq federation-upstream
+component in the default virtual host to the following JSON
+.Qq guest :
+.sp
+.Dl rabbitmqctl set_parameter federation-upstream node01 '{"uri":"amqp://user:password@server/%2F","ack-mode":"on-publish"}'
+.\" ------------------------------------------------------------------
+.It Cm list_policies Op Fl p Ar vhost
+.Pp
+Lists all policies for a virtual host.
+.Pp
+For example, this command lists all policies in the default virtual
+host:
+.sp
+.Dl rabbitmqctl list_policies
+.\" ------------------------------------------------------------------
+.It Cm set_operator_policy Oo Fl p Ar vhost Oc Oo Fl -priority Ar priority Oc Oo Fl -apply-to Ar apply-to Oc Ar name Ar pattern Ar definition
+.Pp
+Sets an operator policy that overrides a subset of arguments in user
+policies.
+Arguments are identical to those of
+.Cm set_policy .
+.Pp
+Supported arguments are:
+.Bl -bullet -compact
+.It
+expires
+.It
+message-ttl
+.It
+max-length
+.It
+max-length-bytes
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_policy Oo Fl p Ar vhost Oc Oo Fl -priority Ar priority Oc Oo Fl -apply-to Ar apply-to Oc Ar name Ar pattern Ar definition
+.Pp
+Sets a policy.
+.Bl -tag -width Ds
+.It Ar name
+The name of the policy.
+.It Ar pattern
+The regular expression, which when matches on a given resources causes
+the policy to apply.
+.It Ar definition
+The definition of the policy, as a JSON term.
+In most shells you are very likely to need to quote this.
+.It Ar priority
+The priority of the policy as an integer.
+Higher numbers indicate greater precedence.
+The default is 0.
+.It Ar apply-to
+Which types of object this policy should apply to.
+Possible values are:
+.Bl -bullet -compact
+.It
+queues
+.It
+exchanges
+.It
+all
+.El
+The default is
+.Cm all ..
+.El
+.Pp
+For example, this command sets the policy
+.Qq federate-me
+in the default virtual host so that built-in exchanges are federated:
+.sp
+.Dl rabbitmqctl set_policy federate-me "^amq." '{"federation-upstream-set":"all"}'
+.\" ------------------------------------------------------------------
+.It Cm clear_policy Oo Fl p Ar vhost Oc Ar name
+.Pp
+Clears a policy.
+.Bl -tag -width Ds
+.It Ar name
+The name of the policy being cleared.
+.El
+.Pp
+For example, this command clears the
+.Qq federate-me
+policy in the default virtual host:
+.sp
+.Dl rabbitmqctl clear_policy federate-me
+.\" ------------------------------------------------------------------
+.It Cm clear_operator_policy Oo Fl p Ar vhost Oc Ar name
+.Pp
+Clears an operator policy.
+Arguments are identical to those of
+.Cm clear_policy .
+.\" ------------------------------------------------------------------
+.It Cm list_operator_policies Op Fl p Ar vhost
+.Pp
+Lists operator policy overrides for a virtual host.
+Arguments are identical to those of
+.Cm list_policies .
+.El
+.\" ------------------------------------------------------------------
+.\" ## Virtual Host Management
+.\" ------------------------------------------------------------------
+.Ss Virtual hosts
+Note that
+.Nm
+manages the RabbitMQ internal user database.
+Permissions for users from any alternative authorisation backend will
+not be visible to
+.Nm .
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm add_vhost Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host entry to create.
+.El
+.Pp
+Creates a virtual host.
+.Pp
+For example, this command instructs the RabbitMQ broker to create a new
+virtual host called
+.Qq test :
+.Pp
+.Dl rabbitmqctl add_vhost test
+.\" ------------------------------------------------------------------
+.It Cm clear_vhost_limits Op Fl p Ar vhost
+.Pp
+Clears virtual host limits.
+.Pp
+For example, this command clears vhost limits in vhost
+.Qq qa_env :
+.sp
+.Dl rabbitmqctl clear_vhost_limits -p qa_env
+.\" ------------------------------------------------------------------
+.It Cm delete_vhost Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host entry to delete.
+.El
+.Pp
+Deletes a virtual host.
+.Pp
+Deleting a virtual host deletes all its exchanges, queues, bindings,
+user permissions, parameters and policies.
+.Pp
+For example, this command instructs the RabbitMQ broker to delete the
+virtual host called
+.Qq test :
+.sp
+.Dl rabbitmqctl delete_vhost a-vhost
+.\" ------------------------------------------------------------------
+.It Cm list_vhost_limits Oo Fl p Ar vhost Oc Oo Fl -global Oc Op Fl -no-table-headers
+.Pp
+Displays configured virtual host limits.
+.Bl -tag -width Ds
+.It Fl -global
+Show limits for all vhosts.
+Suppresses the
+.Fl p
+parameter.
+.El
+.\" ------------------------------------------------------------------
+.It Cm restart_vhost Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host entry to restart.
+.El
+.Pp
+Restarts a failed vhost data stores and queues.
+.Pp
+For example, this command instructs the RabbitMQ broker to restart a
+virtual host called
+.Qq test :
+.Pp
+.Dl rabbitmqctl restart_vhost test
+.\" ------------------------------------------------------------------
+.It Cm set_vhost_limits Oo Fl p Ar vhost Oc Ar definition
+.Pp
+Sets virtual host limits.
+.Bl -tag -width Ds
+.It Ar definition
+The definition of the limits, as a JSON term.
+In most shells you are very likely to need to quote this.
+.Pp
+Recognised limits are:
+.Bl -bullet -compact
+.It
+max-connections
+.It
+max-queues
+.El
+.Pp
+Use a negative value to specify "no limit".
+.El
+.Pp
+For example, this command limits the max number of concurrent
+connections in vhost
+.Qq qa_env
+to 64:
+.sp
+.Dl rabbitmqctl set_vhost_limits -p qa_env '{"max-connections": 64}'
+.Pp
+This command limits the max number of queues in vhost
+.Qq qa_env
+to 256:
+.sp
+.Dl rabbitmqctl set_vhost_limits -p qa_env '{"max-queues": 256}'
+.Pp
+This command clears the max number of connections limit in vhost
+.Qq qa_env :
+.sp
+.Dl rabbitmqctl set_vhost_limits -p qa_env '{"max\-connections": \-1}'
+.Pp
+This command disables client connections in vhost
+.Qq qa_env :
+.sp
+.Dl rabbitmqctl set_vhost_limits -p qa_env '{"max-connections": 0}'
+.\" ------------------------------------------------------------------
+.It Cm trace_off Op Fl p Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host for which to stop tracing.
+.El
+.Pp
+Stops tracing.
+.\" ------------------------------------------------------------------
+.It Cm trace_on Op Fl p Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host for which to start tracing.
+.El
+.Pp
+Starts tracing.
+Note that the trace state is not persistent; it will revert to being off
+if the node is restarted.
+.El
+.\" ------------------------------------------------------------------
+.\" ## Configuration
+.\" ------------------------------------------------------------------
+.Ss Configuration
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm decode Ar value Ar passphrase Oo Fl -cipher Ar cipher Oc Oo Fl -hash Ar hash Oc Op Fl -iterations Ar iterations
+.Bl -tag -width Ds
+.It Ar value Ar passphrase
+Value to decrypt (as produced by the encode command) and passphrase.
+.Pp
+For example:
+.sp
+.Dl rabbitmqctl decode '{encrypted, <<"...">>}' mypassphrase
+.It Fl -cipher Ar cipher Fl -hash Ar hash Fl -iterations Ar iterations
+Options to specify the decryption settings.
+They can be used independently.
+.Pp
+For example:
+.sp
+.Dl rabbitmqctl decode --cipher blowfish_cfb64 --hash sha256 --iterations 10000 '{encrypted,<<"...">>} mypassphrase
+.El
+.\" ------------------------------------------------------------------
+.It Cm encode Ar value Ar passphrase Oo Fl -cipher Ar cipher Oc Oo Fl -hash Ar hash Oc Op Fl -iterations Ar iterations
+.Bl -tag -width Ds
+.It Ar value Ar passphrase
+Value to encrypt and passphrase.
+.Pp
+For example:
+.sp
+.Dl rabbitmqctl encode '<<"guest">>' mypassphrase
+.It Fl -cipher Ar cipher Fl -hash Ar hash Fl -iterations Ar iterations
+Options to specify the encryption settings.
+They can be used independently.
+.Pp
+For example:
+.sp
+.Dl rabbitmqctl encode --cipher blowfish_cfb64 --hash sha256 --iterations 10000 '<<"guest">>' mypassphrase
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_cluster_name Ar name
+.Pp
+Sets the cluster name to
+.Ar name .
+The cluster name is announced to clients on connection, and used by the
+federation and shovel plugins to record where a message has been.
+The cluster name is by default derived from the hostname of the first
+node in the cluster, but can be changed.
+.Pp
+For example, this sets the cluster name to
+.Qq london :
+.sp
+.Dl rabbitmqctl set_cluster_name london
+.\" ------------------------------------------------------------------
+.It Cm set_disk_free_limit Ar disk_limit
+.Bl -tag -width Ds
+.It Ar disk_limit
+Lower bound limit as an integer in bytes or a string with memory unit symbols
+(see vm_memory_high_watermark), e.g. 512M or 1G.
+Once free disk space reaches the limit, a disk alarm will be set.
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_disk_free_limit mem_relative Ar fraction
+.Bl -tag -width Ds
+.It Ar fraction
+Limit relative to the total amount available RAM as a non-negative
+floating point number.
+Values lower than 1.0 can be dangerous and should be used carefully.
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_log_level Op Ar log_level
+.Pp
+Sets log level in the running node
+.Pp
+Supported
+.Ar type
+values are:
+.Bl -bullet -compact
+.It
+debug
+.It
+info
+.It
+warning
+.It
+error
+.It
+none
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmqctl log_level debug
+.\" ------------------------------------------------------------------
+.It Cm set_vm_memory_high_watermark Ar fraction
+.Bl -tag -width Ds
+.It Ar fraction
+The new memory threshold fraction at which flow control is triggered, as
+a floating point number greater than or equal to 0.
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_vm_memory_high_watermark Oo absolute Oc Ar memory_limit
+.Bl -tag -width Ds
+.It Ar memory_limit
+The new memory limit at which flow control is triggered, expressed in
+bytes as an integer number greater than or equal to 0 or as a string
+with memory unit symbol(e.g. 512M or 1G).
+Available unit symbols are:
+.Bl -tag -width Ds
+.It Cm k , Cm kiB
+kibibytes (2^10 bytes)
+.It Cm M , Cm MiB
+mebibytes (2^20 bytes)
+.It Cm G , Cm GiB
+gibibytes (2^30 bytes)
+.It Cm kB
+kilobytes (10^3 bytes)
+.It Cm MB
+megabytes (10^6 bytes)
+.It Cm GB
+gigabytes (10^9 bytes)
+.El
+.El
+.El
+.\" ------------------------------------------------------------------
+.\" ## Feature Flags
+.\" ------------------------------------------------------------------
+.Ss Feature flags
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm enable_feature_flag Ar feature_flag
+.Pp
+Enables a feature flag on the target node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmqctl enable_feature_flag quorum_queue
+.\" ------------------------------------------------------------------
+.It Cm list_feature_flags Op Ar column ...
+.Pp
+Lists feature flags
+.Pp
+Supported
+.Ar column
+values are:
+.Bl -bullet -compact
+.It
+name
+.It
+state
+.It
+stability
+.It
+provided_by
+.It
+desc
+.It
+doc_url
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmqctl list_feature_flags name state
+.El
+.\" ------------------------------------------------------------------
+.\" ## Misc Operations
+.\" ------------------------------------------------------------------
+.Ss Connection Operations
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm close_all_connections Oo Fl p Ar vhost Oc Oo Fl -global Oc Oo Fl -per-connection-delay Ar delay Oc Oo Fl -limit Ar limit Oc Ar explanation
+.Bl -tag -width Ds
+.It Fl p Ar vhost
+The name of the virtual host for which connections should be closed.
+Ignored when
+.Fl -global
+is specified.
+.It Fl -global
+If connections should be close for all vhosts.
+Overrides
+.Fl p
+.It Fl -per-connection-delay Ar delay
+Time in milliseconds to wait after each connection closing.
+.It Fl -limit Ar limit
+Number of connection to close.
+Only works per vhost.
+Ignored when
+.Fl -global
+is specified.
+.It Ar explanation
+Explanation string.
+.El
+.Pp
+Instructs the broker to close all connections for the specified vhost or entire RabbitMQ node.
+.Pp
+For example, this command instructs the RabbitMQ broker to close 10 connections on
+.Qq qa_env
+vhost, passing the explanation
+.Qq Please close :
+.sp
+.Dl rabbitmqctl close_all_connections -p qa_env --limit 10 'Please close'
+.Pp
+This command instructs broker to close all connections to the node:
+.sp
+.Dl rabbitmqctl close_all_connections --global
+.sp
+.\" ------------------------------------------------------------------
+.It Cm close_connection Ar connectionpid Ar explanation
+.Bl -tag -width Ds
+.It Ar connectionpid
+Id of the Erlang process associated with the connection to close.
+.It Ar explanation
+Explanation string.
+.El
+.Pp
+Instructs the broker to close the connection associated with the Erlang
+process id
+.Ar connectionpid
+(see also the
+.Cm list_connections
+command), passing the
+.Ar explanation
+string to the connected client as part of the AMQP connection shutdown
+protocol.
+.Pp
+For example, this command instructs the RabbitMQ broker to close the connection associated with the Erlang process id
+.Qq <rabbit@tanto.4262.0> ,
+passing the explanation
+.Qq go away
+to the connected client:
+.sp
+.Dl rabbitmqctl close_connection Qo <rabbit@tanto.4262.0> Qc Qq go away
+.El
+.\" ------------------------------------------------------------------
+.\" ## Misc
+.\" ------------------------------------------------------------------
+.Ss Misc
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm eval Ar expression
+.Pp
+Evaluates an Erlang expression on the target node
+.\" ------------------------------------------------------------------
+.\" ## Queue Operations
+.\" ------------------------------------------------------------------
+.Ss Queue Operations
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm delete_queue Ar queue_name Oo Fl -if-empty | Fl e Oc Op Fl -if-unused | Fl u
+.Bl -tag -width Ds
+.It Ar queue_name
+The name of the queue to delete.
+.It Ar --if-empty
+Delete the queue if it is empty (has no messages ready for delivery)
+.It Ar --if-unused
+Delete the queue only if it has no consumers
+.El
+.Pp
+Deletes a queue.
+.\" ------------------------------------------------------------------
+.It Cm purge_queue Oo Fl p Ar vhost Oc Ar queue
+.Bl -tag -width Ds
+.It Ar queue
+The name of the queue to purge.
+.El
+.Pp
+Purges a queue (removes all messages in it).
+.El
+.\" ------------------------------------------------------------------------------------------------
+.Sh PLUGIN COMMANDS
+.\" ------------------------------------------------------------------------------------------------
+RabbitMQ plugins can extend rabbitmqctl tool to add new commands when enabled.
+Currently available commands can be found in
+.Cm rabbitmqctl help
+output.
+Following commands are added by RabbitMQ plugins, available in default
+distribution:
+.\" ------------------------------------------------------------------
+.\" ## Shovel
+.\" ------------------------------------------------------------------
+.Ss Shovel plugin
+.Bl -tag -width Ds
+.It Cm shovel_status
+Prints a list of configured Shovels
+.It Cm delete_shovel Oo Fl p Ar vhost Oc Ar name
+Instructs the RabbitMQ node to delete the configured shovel by
+.Ar name .
+.El
+.\" ------------------------------------------------------------------
+.\" ## Federation
+.\" ------------------------------------------------------------------
+.Ss Federation plugin
+.Bl -tag -width Ds
+.It Cm federation_status Op Fl -only-down
+Prints a list of federation links.
+.Bl -tag -width Ds
+.It Fl -only-down
+Only list federation links which are not running.
+.El
+.It Cm restart_federation_link Ar link_id
+Instructs the RabbitMQ node to restart the federation link with specified
+.Ar link_id .
+.El
+.\" ------------------------------------------------------------------
+.\" ## AMQP 1.0
+.\" ------------------------------------------------------------------
+.Ss AMQP 1.0 plugin
+.Bl -tag -width Ds
+.It Cm list_amqp10_connections Op Ar amqp10_connectioninfoitem ...
+Similar to the
+.Cm list_connections
+command, but returns fields which make sense for AMQP-1.0 connections.
+.Ar amqp10_connectioninfoitem
+parameter is used to indicate which connection information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar amqp10_connectioninfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm pid
+Id of the Erlang process associated with the connection.
+.It Cm auth_mechanism
+SASL authentication mechanism used, such as
+.Qq PLAIN .
+.It Cm host
+Server hostname obtained via reverse DNS, or its IP address if reverse
+DNS failed or was disabled.
+.It Cm frame_max
+Maximum frame size (bytes).
+.It Cm timeout
+Connection timeout / negotiated heartbeat interval, in seconds.
+.It Cm user
+Username associated with the connection.
+.It Cm state
+Connection state; one of:
+.Bl -bullet -compact
+.It
+starting
+.It
+waiting_amqp0100
+.It
+securing
+.It
+running
+.It
+blocking
+.It
+blocked
+.It
+closing
+.It
+closed
+.El
+.It Cm recv_oct
+Octets received.
+.It Cm recv_cnt
+Packets received.
+.It Cm send_oct
+Octets send.
+.It Cm send_cnt
+Packets sent.
+.It Cm ssl
+Boolean indicating whether the connection is secured with SSL.
+.It Cm ssl_protocol
+SSL protocol (e.g.\&
+.Qq tlsv1 ) .
+.It Cm ssl_key_exchange
+SSL key exchange algorithm (e.g.\&
+.Qq rsa ) .
+.It Cm ssl_cipher
+SSL cipher algorithm (e.g.\&
+.Qq aes_256_cbc ) .
+.It Cm ssl_hash
+SSL hash function (e.g.\&
+.Qq sha ) .
+.It Cm peer_cert_subject
+The subject of the peer's SSL certificate, in RFC4514 form.
+.It Cm peer_cert_issuer
+The issuer of the peer's SSL certificate, in RFC4514 form.
+.It Cm peer_cert_validity
+The period for which the peer's SSL certificate is valid.
+.It Cm node
+The node name of the RabbitMQ node to which connection is established.
+.El
+.El
+.\" ------------------------------------------------------------------
+.\" ## MQTT
+.\" ------------------------------------------------------------------
+.Ss MQTT plugin
+.Bl -tag -width Ds
+.It Cm list_mqtt_connections Op Ar mqtt_connectioninfoitem
+Similar to the
+.Cm list_connections
+command, but returns fields which make sense for MQTT connections.
+.Ar mqtt_connectioninfoitem
+parameter is used to indicate which connection information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar mqtt_connectioninfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm host
+Server hostname obtained via reverse DNS, or its IP address if reverse
+DNS failed or was disabled.
+.It Cm port
+Server port.
+.It Cm peer_host
+Peer hostname obtained via reverse DNS, or its IP address if reverse DNS
+failed or was not enabled.
+.It Cm peer_port
+Peer port.
+.It Cm protocol
+MQTT protocol version, which can be on of the following:
+.Bl -bullet -compact
+.It
+{'MQTT', N/A}
+.It
+{'MQTT', 3.1.0}
+.It
+{'MQTT', 3.1.1}
+.El
+.It Cm channels
+Number of channels using the connection.
+.It Cm channel_max
+Maximum number of channels on this connection.
+.It Cm frame_max
+Maximum frame size (bytes).
+.It Cm client_properties
+Informational properties transmitted by the client during connection
+establishment.
+.It Cm ssl
+Boolean indicating whether the connection is secured with SSL.
+.It Cm ssl_protocol
+SSL protocol (e.g.\&
+.Qq tlsv1 ) .
+.It Cm ssl_key_exchange
+SSL key exchange algorithm (e.g.\&
+.Qq rsa ) .
+.It Cm ssl_cipher
+SSL cipher algorithm (e.g.\&
+.Qq aes_256_cbc ) .
+.It Cm ssl_hash
+SSL hash function (e.g.\&
+.Qq sha ) .
+.It Cm conn_name
+Readable name for the connection.
+.It Cm connection_state
+Connection state; one of:
+.Bl -bullet -compact
+.It
+starting
+.It
+running
+.It
+blocked
+.El
+.It Cm connection
+Id of the Erlang process associated with the internal amqp direct connection.
+.It Cm consumer_tags
+A tuple of consumer tags for QOS0 and QOS1.
+.It Cm message_id
+The last Packet ID sent in a control message.
+.It Cm client_id
+MQTT client identifier for the connection.
+.It Cm clean_sess
+MQTT clean session flag.
+.It Cm will_msg
+MQTT Will message sent in CONNECT frame.
+.It Cm exchange
+Exchange to route MQTT messages configured in rabbitmq_mqtt application environment.
+.It Cm ssl_login_name
+SSL peer cert auth name
+.It Cm retainer_pid
+Id of the Erlang process associated with retain storage for the connection.
+.It Cm user
+Username associated with the connection.
+.It Cm vhost
+Virtual host name with non-ASCII characters escaped as in C.
+.El
+.El
+.\" ------------------------------------------------------------------
+.\" ## STOMP
+.\" ------------------------------------------------------------------
+.Ss STOMP plugin
+.Bl -tag -width Ds
+.It Cm list_stomp_connections Op Ar stomp_connectioninfoitem
+Similar to the
+.Cm list_connections
+command, but returns fields which make sense for STOMP connections.
+.Ar stomp_connectioninfoitem
+parameter is used to indicate which connection information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar stomp_connectioninfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm conn_name
+Readable name for the connection.
+.It Cm connection
+Id of the Erlang process associated with the internal amqp direct connection.
+.It Cm connection_state
+Connection state; one of:
+.Bl -bullet -compact
+.It
+running
+.It
+blocking
+.It
+blocked
+.El
+.It Cm session_id
+STOMP protocol session identifier
+.It Cm channel
+AMQP channel associated with the connection
+.It Cm version
+Negotiated STOMP protocol version for the connection.
+.It Cm implicit_connect
+Indicates if the connection was established using implicit connect (without CONNECT frame)
+.It Cm auth_login
+Effective username for the connection.
+.It Cm auth_mechanism
+STOMP authorization mechanism.
+Can be one of:
+.Bl -bullet -compact
+.It
+config
+.It
+ssl
+.It
+stomp_headers
+.El
+.It Cm port
+Server port.
+.It Cm host
+Server hostname obtained via reverse DNS, or its IP address if reverse
+DNS failed or was not enabled.
+.It Cm peer_port
+Peer port.
+.It Cm peer_host
+Peer hostname obtained via reverse DNS, or its IP address if reverse DNS
+failed or was not enabled.
+.It Cm protocol
+STOMP protocol version, which can be on of the following:
+.Bl -bullet -compact
+.It
+{'STOMP', 0}
+.It
+{'STOMP', 1}
+.It
+{'STOMP', 2}
+.El
+.It Cm channels
+Number of channels using the connection.
+.It Cm channel_max
+Maximum number of channels on this connection.
+.It Cm frame_max
+Maximum frame size (bytes).
+.It Cm client_properties
+Informational properties transmitted by the client during connection
+.It Cm ssl
+Boolean indicating whether the connection is secured with SSL.
+.It Cm ssl_protocol
+TLS protocol (e.g.\&
+.Qq tlsv1 ) .
+.It Cm ssl_key_exchange
+TLS key exchange algorithm (e.g.\&
+.Qq rsa ) .
+.It Cm ssl_cipher
+TLS cipher algorithm (e.g.\&
+.Qq aes_256_cbc ) .
+.It Cm ssl_hash
+SSL hash function (e.g.\&
+.Qq sha ) .
+.El
+.El
+.\" ------------------------------------------------------------------
+.\" ## Management Agent
+.\" ------------------------------------------------------------------
+.Ss Management agent plugin
+.Bl -tag -width Ds
+.It Cm reset_stats_db Op Fl -all
+Reset management stats database for the RabbitMQ node.
+.Bl -tag -width Ds
+.It Fl -all
+Reset stats database for all nodes in the cluster.
+.El
+.El
+.\" ------------------------------------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------------------------------------
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/set_rabbitmq_policy.sh.example b/deps/rabbit/docs/set_rabbitmq_policy.sh.example
new file mode 100644
index 0000000000..f46e901ad5
--- /dev/null
+++ b/deps/rabbit/docs/set_rabbitmq_policy.sh.example
@@ -0,0 +1,4 @@
+# This script is called by rabbitmq-server-ha.ocf during RabbitMQ
+# cluster start up. It is a convenient place to set your cluster
+# policy here, for example:
+# ${OCF_RESKEY_ctl} set_policy ha-all "." '{"ha-mode":"all", "ha-sync-mode":"automatic"}' --apply-to all --priority 0
diff --git a/deps/rabbit/erlang.mk b/deps/rabbit/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbit/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbit/include/amqqueue.hrl b/deps/rabbit/include/amqqueue.hrl
new file mode 100644
index 0000000000..097f1dfa0c
--- /dev/null
+++ b/deps/rabbit/include/amqqueue.hrl
@@ -0,0 +1,132 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-include("amqqueue_v1.hrl").
+-include("amqqueue_v2.hrl").
+
+-define(is_amqqueue(Q),
+ (?is_amqqueue_v2(Q) orelse
+ ?is_amqqueue_v1(Q))).
+
+-define(amqqueue_is_auto_delete(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_auto_delete(Q) =:= true) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_auto_delete(Q) =:= true))).
+
+-define(amqqueue_is_durable(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_durable(Q) =:= true) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_durable(Q) =:= true))).
+
+-define(amqqueue_exclusive_owner_is(Q, Owner),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_exclusive_owner(Q) =:= Owner) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_exclusive_owner(Q) =:= Owner))).
+
+-define(amqqueue_exclusive_owner_is_pid(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ is_pid(?amqqueue_v2_field_exclusive_owner(Q))) orelse
+ (?is_amqqueue_v1(Q) andalso
+ is_pid(?amqqueue_v1_field_exclusive_owner(Q))))).
+
+-define(amqqueue_state_is(Q, State),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_state(Q) =:= State) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_state(Q) =:= State))).
+
+-define(amqqueue_v1_type, rabbit_classic_queue).
+
+-define(amqqueue_is_classic(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_type(Q) =:= rabbit_classic_queue) orelse
+ ?is_amqqueue_v1(Q))).
+
+-define(amqqueue_is_quorum(Q),
+ (?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_type(Q) =:= rabbit_quorum_queue) orelse
+ false).
+
+-define(amqqueue_is_stream(Q),
+ (?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_type(Q) =:= rabbit_stream_queue) orelse
+ false).
+
+-define(amqqueue_has_valid_pid(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ is_pid(?amqqueue_v2_field_pid(Q))) orelse
+ (?is_amqqueue_v1(Q) andalso
+ is_pid(?amqqueue_v1_field_pid(Q))))).
+
+-define(amqqueue_pid_runs_on_local_node(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ node(?amqqueue_v2_field_pid(Q)) =:= node()) orelse
+ (?is_amqqueue_v1(Q) andalso
+ node(?amqqueue_v1_field_pid(Q)) =:= node()))).
+
+-define(amqqueue_pid_equals(Q, Pid),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_pid(Q) =:= Pid) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_pid(Q) =:= Pid))).
+
+-define(amqqueue_pids_are_equal(Q0, Q1),
+ ((?is_amqqueue_v2(Q0) andalso ?is_amqqueue_v2(Q1) andalso
+ ?amqqueue_v2_field_pid(Q0) =:= ?amqqueue_v2_field_pid(Q1)) orelse
+ (?is_amqqueue_v1(Q0) andalso ?is_amqqueue_v1(Q1) andalso
+ ?amqqueue_v1_field_pid(Q0) =:= ?amqqueue_v1_field_pid(Q1)))).
+
+-define(amqqueue_field_name(Q),
+ case ?is_amqqueue_v2(Q) of
+ true -> ?amqqueue_v2_field_name(Q);
+ false -> case ?is_amqqueue_v1(Q) of
+ true -> ?amqqueue_v1_field_name(Q)
+ end
+ end).
+
+-define(amqqueue_field_pid(Q),
+ case ?is_amqqueue_v2(Q) of
+ true -> ?amqqueue_v2_field_pid(Q);
+ false -> case ?is_amqqueue_v1(Q) of
+ true -> ?amqqueue_v1_field_pid(Q)
+ end
+ end).
+
+-define(amqqueue_v1_vhost(Q), element(2, ?amqqueue_v1_field_name(Q))).
+-define(amqqueue_v2_vhost(Q), element(2, ?amqqueue_v2_field_name(Q))).
+
+-define(amqqueue_vhost_equals(Q, VHost),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_vhost(Q) =:= VHost) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_vhost(Q) =:= VHost))).
+
+-ifdef(DEBUG_QUORUM_QUEUE_FF).
+-define(enable_quorum_queue_if_debug,
+ begin
+ rabbit_log:info(
+ "---- ENABLING quorum_queue as part of "
+ "?try_mnesia_tx_or_upgrade_amqqueue_and_retry() ----"),
+ ok = rabbit_feature_flags:enable(quorum_queue)
+ end).
+-else.
+-define(enable_quorum_queue_if_debug, noop).
+-endif.
+
+-define(try_mnesia_tx_or_upgrade_amqqueue_and_retry(Expr1, Expr2),
+ try
+ ?enable_quorum_queue_if_debug,
+ Expr1
+ catch
+ throw:{error, {bad_type, T}} when ?is_amqqueue(T) ->
+ Expr2;
+ throw:{aborted, {bad_type, T}} when ?is_amqqueue(T) ->
+ Expr2
+ end).
diff --git a/deps/rabbit/include/amqqueue_v1.hrl b/deps/rabbit/include/amqqueue_v1.hrl
new file mode 100644
index 0000000000..04b2d72850
--- /dev/null
+++ b/deps/rabbit/include/amqqueue_v1.hrl
@@ -0,0 +1,20 @@
+-define(is_amqqueue_v1(Q), is_record(Q, amqqueue, 19)).
+
+-define(amqqueue_v1_field_name(Q), element(2, Q)).
+-define(amqqueue_v1_field_durable(Q), element(3, Q)).
+-define(amqqueue_v1_field_auto_delete(Q), element(4, Q)).
+-define(amqqueue_v1_field_exclusive_owner(Q), element(5, Q)).
+-define(amqqueue_v1_field_arguments(Q), element(6, Q)).
+-define(amqqueue_v1_field_pid(Q), element(7, Q)).
+-define(amqqueue_v1_field_slave_pids(Q), element(8, Q)).
+-define(amqqueue_v1_field_sync_slave_pids(Q), element(9, Q)).
+-define(amqqueue_v1_field_recoverable_slaves(Q), element(10, Q)).
+-define(amqqueue_v1_field_policy(Q), element(11, Q)).
+-define(amqqueue_v1_field_operator_policy(Q), element(12, Q)).
+-define(amqqueue_v1_field_gm_pids(Q), element(13, Q)).
+-define(amqqueue_v1_field_decorators(Q), element(14, Q)).
+-define(amqqueue_v1_field_state(Q), element(15, Q)).
+-define(amqqueue_v1_field_policy_version(Q), element(16, Q)).
+-define(amqqueue_v1_field_slave_pids_pending_shutdown(Q), element(17, Q)).
+-define(amqqueue_v1_field_vhost(Q), element(18, Q)).
+-define(amqqueue_v1_field_options(Q), element(19, Q)).
diff --git a/deps/rabbit/include/amqqueue_v2.hrl b/deps/rabbit/include/amqqueue_v2.hrl
new file mode 100644
index 0000000000..c79a3b7366
--- /dev/null
+++ b/deps/rabbit/include/amqqueue_v2.hrl
@@ -0,0 +1,22 @@
+-define(is_amqqueue_v2(Q), is_record(Q, amqqueue, 21)).
+
+-define(amqqueue_v2_field_name(Q), element(2, Q)).
+-define(amqqueue_v2_field_durable(Q), element(3, Q)).
+-define(amqqueue_v2_field_auto_delete(Q), element(4, Q)).
+-define(amqqueue_v2_field_exclusive_owner(Q), element(5, Q)).
+-define(amqqueue_v2_field_arguments(Q), element(6, Q)).
+-define(amqqueue_v2_field_pid(Q), element(7, Q)).
+-define(amqqueue_v2_field_slave_pids(Q), element(8, Q)).
+-define(amqqueue_v2_field_sync_slave_pids(Q), element(9, Q)).
+-define(amqqueue_v2_field_recoverable_slaves(Q), element(10, Q)).
+-define(amqqueue_v2_field_policy(Q), element(11, Q)).
+-define(amqqueue_v2_field_operator_policy(Q), element(12, Q)).
+-define(amqqueue_v2_field_gm_pids(Q), element(13, Q)).
+-define(amqqueue_v2_field_decorators(Q), element(14, Q)).
+-define(amqqueue_v2_field_state(Q), element(15, Q)).
+-define(amqqueue_v2_field_policy_version(Q), element(16, Q)).
+-define(amqqueue_v2_field_slave_pids_pending_shutdown(Q), element(17, Q)).
+-define(amqqueue_v2_field_vhost(Q), element(18, Q)).
+-define(amqqueue_v2_field_options(Q), element(19, Q)).
+-define(amqqueue_v2_field_type(Q), element(20, Q)).
+-define(amqqueue_v2_field_type_state(Q), element(21, Q)).
diff --git a/deps/rabbit/include/gm_specs.hrl b/deps/rabbit/include/gm_specs.hrl
new file mode 100644
index 0000000000..2a16c862c4
--- /dev/null
+++ b/deps/rabbit/include/gm_specs.hrl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-type callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}.
+-type args() :: any().
+-type members() :: [pid()].
+
+-spec joined(args(), members()) -> callback_result().
+-spec members_changed(args(), members(),members()) -> callback_result().
+-spec handle_msg(args(), pid(), any()) -> callback_result().
+-spec handle_terminate(args(), term()) -> any().
diff --git a/deps/rabbit/include/vhost.hrl b/deps/rabbit/include/vhost.hrl
new file mode 100644
index 0000000000..d3abc0dd2a
--- /dev/null
+++ b/deps/rabbit/include/vhost.hrl
@@ -0,0 +1,6 @@
+-include("vhost_v1.hrl").
+-include("vhost_v2.hrl").
+
+-define(is_vhost(V),
+ (?is_vhost_v2(V) orelse
+ ?is_vhost_v1(V))).
diff --git a/deps/rabbit/include/vhost_v1.hrl b/deps/rabbit/include/vhost_v1.hrl
new file mode 100644
index 0000000000..185739c6be
--- /dev/null
+++ b/deps/rabbit/include/vhost_v1.hrl
@@ -0,0 +1,4 @@
+-define(is_vhost_v1(V), is_record(V, vhost, 3)).
+
+-define(vhost_v1_field_name(V), element(2, V)).
+-define(vhost_v1_field_limits(V), element(3, V)).
diff --git a/deps/rabbit/include/vhost_v2.hrl b/deps/rabbit/include/vhost_v2.hrl
new file mode 100644
index 0000000000..9345e8b206
--- /dev/null
+++ b/deps/rabbit/include/vhost_v2.hrl
@@ -0,0 +1,5 @@
+-define(is_vhost_v2(V), is_record(V, vhost, 4)).
+
+-define(vhost_v2_field_name(Q), element(2, Q)).
+-define(vhost_v2_field_limits(Q), element(3, Q)).
+-define(vhost_v2_field_metadata(Q), element(4, Q)).
diff --git a/deps/rabbit/priv/schema/.gitignore b/deps/rabbit/priv/schema/.gitignore
new file mode 100644
index 0000000000..68e5b59a44
--- /dev/null
+++ b/deps/rabbit/priv/schema/.gitignore
@@ -0,0 +1,4 @@
+# plugin schemas are extracted
+# into this directory: this is a Cuttlefish
+# requirement. So we ignore them.
+rabbitmq_*.schema
diff --git a/deps/rabbit/priv/schema/rabbit.schema b/deps/rabbit/priv/schema/rabbit.schema
new file mode 100644
index 0000000000..518403c20d
--- /dev/null
+++ b/deps/rabbit/priv/schema/rabbit.schema
@@ -0,0 +1,1791 @@
+% vim:ft=erlang:
+% ==============================
+% Rabbit app section
+% ==============================
+
+%%
+%% Network Connectivity
+%% ====================
+%%
+
+%% By default, RabbitMQ will listen on all interfaces, using
+%% the standard (reserved) AMQP port.
+%%
+%% {tcp_listeners, [5672]},
+%% To listen on a specific interface, provide a tuple of {IpAddress, Port}.
+%% For example, to listen only on localhost for both IPv4 and IPv6:
+%%
+%% {tcp_listeners, [{"127.0.0.1", 5672},
+%% {"[::1]", 5672}]},
+
+{mapping, "listeners.tcp", "rabbit.tcp_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "listeners.tcp.$name", "rabbit.tcp_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbit.tcp_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("listeners.tcp", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("listeners.tcp", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% TLS listeners are configured in the same fashion as TCP listeners,
+%% including the option to control the choice of interface.
+%%
+%% {ssl_listeners, [5671]},
+
+{mapping, "listeners.ssl", "rabbit.ssl_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "listeners.ssl.$name", "rabbit.ssl_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbit.ssl_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("listeners.ssl", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("listeners.ssl", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Number of Erlang processes that will accept connections for the TCP
+%% and TLS listeners.
+%%
+%% {num_tcp_acceptors, 10},
+%% {num_ssl_acceptors, 1},
+
+{mapping, "num_acceptors.ssl", "rabbit.num_ssl_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "num_acceptors.tcp", "rabbit.num_tcp_acceptors", [
+ {datatype, integer}
+]}.
+
+
+{mapping, "socket_writer.gc_threshold", "rabbit.writer_gc_threshold", [
+ {datatype, [{atom, off}, integer]}
+]}.
+
+{translation, "rabbit.writer_gc_threshold",
+ fun(Conf) ->
+ case cuttlefish:conf_get("socket_writer.gc_threshold", Conf, undefined) of
+ %% missing from the config
+ undefined -> cuttlefish:unset();
+ %% explicitly disabled
+ off -> undefined;
+ Int when is_integer(Int) andalso Int > 0 ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% Maximum time for 0-9-1 handshake (after socket connection
+%% and TLS handshake), in milliseconds.
+%%
+%% {handshake_timeout, 10000},
+
+{mapping, "handshake_timeout", "rabbit.handshake_timeout", [
+ {datatype, [{atom, infinity}, integer]}
+]}.
+
+%% Set to 'true' to perform reverse DNS lookups when accepting a
+%% connection. Hostnames will then be shown instead of IP addresses
+%% in rabbitmqctl and the management plugin.
+%%
+%% {reverse_dns_lookups, true},
+
+{mapping, "reverse_dns_lookups", "rabbit.reverse_dns_lookups", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "erlang.K", "vm_args.+K", [
+ {default, "true"},
+ {level, advanced}
+]}.
+
+%%
+%% Definition import
+%%
+
+%% Load definitions from a JSON file or directory of files. See
+%% https://www.rabbitmq.com/management.html#load-definitions
+%%
+%% {load_definitions, "/path/to/schema.json"},
+%% {load_definitions, "/path/to/schemas"},
+{mapping, "load_definitions", "rabbit.load_definitions",
+ [{datatype, string},
+ {validators, ["file_accessible"]}]}.
+
+%%
+%% Security / AAA
+%% ==============
+%%
+
+%% The default "guest" user is only permitted to access the server
+%% via a loopback interface (e.g. localhost).
+%% {loopback_users, [<<"guest">>]},
+%%
+%% Uncomment the following line if you want to allow access to the
+%% guest user from anywhere on the network.
+%% {loopback_users, []},
+
+{mapping, "loopback_users", "rabbit.loopback_users", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "loopback_users.$user", "rabbit.loopback_users", [
+ {datatype, atom}
+]}.
+
+{translation, "rabbit.loopback_users",
+fun(Conf) ->
+ None = cuttlefish:conf_get("loopback_users", Conf, undefined),
+ case None of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("loopback_users", Conf),
+ [ list_to_binary(U) || {["loopback_users", U], V} <- Settings, V == true ]
+ end
+end}.
+
+%% TLS options.
+%% See https://www.rabbitmq.com/ssl.html for full documentation.
+%%
+%% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"},
+%% {certfile, "/path/to/server/cert.pem"},
+%% {keyfile, "/path/to/server/key.pem"},
+%% {verify, verify_peer},
+%% {fail_if_no_peer_cert, false}]},
+
+{mapping, "ssl_allow_poodle_attack", "rabbit.ssl_allow_poodle_attack",
+[{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options", "rabbit.ssl_options", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbit.ssl_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("ssl_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid ssl_options")
+ end
+end}.
+
+{mapping, "ssl_options.verify", "rabbit.ssl_options.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "ssl_options.fail_if_no_peer_cert", "rabbit.ssl_options.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.cacertfile", "rabbit.ssl_options.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.certfile", "rabbit.ssl_options.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.cacerts.$name", "rabbit.ssl_options.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("ssl_options.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "ssl_options.cert", "rabbit.ssl_options.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("ssl_options.cert", Conf))
+end}.
+
+{mapping, "ssl_options.client_renegotiation", "rabbit.ssl_options.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.crl_check", "rabbit.ssl_options.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "ssl_options.depth", "rabbit.ssl_options.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "ssl_options.dh", "rabbit.ssl_options.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("ssl_options.dh", Conf))
+end}.
+
+{mapping, "ssl_options.dhfile", "rabbit.ssl_options.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.honor_cipher_order", "rabbit.ssl_options.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.honor_ecc_order", "rabbit.ssl_options.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.key.RSAPrivateKey", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.key.DSAPrivateKey", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.key.PrivateKeyInfo", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("ssl_options.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "ssl_options.keyfile", "rabbit.ssl_options.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.log_alert", "rabbit.ssl_options.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.password", "rabbit.ssl_options.password",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.reuse_sessions", "rabbit.ssl_options.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.secure_renegotiate", "rabbit.ssl_options.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.versions.$version", "rabbit.ssl_options.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.ssl_options.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("ssl_options.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "ssl_options.ciphers.$cipher", "rabbit.ssl_options.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("ssl_options.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
+
+%% ===========================================================================
+
+%% Choose the available SASL mechanism(s) to expose.
+%% The two default (built in) mechanisms are 'PLAIN' and
+%% 'AMQPLAIN'. Additional mechanisms can be added via
+%% plugins.
+%%
+%% See https://www.rabbitmq.com/authentication.html for more details.
+%%
+%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
+
+{mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [
+ {datatype, atom}]}.
+
+{translation, "rabbit.auth_mechanisms",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
+
+
+%% Select an authentication backend to use. RabbitMQ provides an
+%% internal backend in the core.
+%%
+%% {auth_backends, [rabbit_auth_backend_internal]},
+
+{translation, "rabbit.auth_backends",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_backends", Conf),
+ BackendModule = fun
+ (internal) -> rabbit_auth_backend_internal;
+ (ldap) -> rabbit_auth_backend_ldap;
+ (http) -> rabbit_auth_backend_http;
+ (cache) -> rabbit_auth_backend_cache;
+ (amqp) -> rabbit_auth_backend_amqp;
+ (dummy) -> rabbit_auth_backend_dummy;
+ (Other) when is_atom(Other) -> Other;
+ (_) -> cuttlefish:invalid("Unknown/unsupported auth backend")
+ end,
+ AuthBackends = [{Num, {default, BackendModule(V)}} || {["auth_backends", Num], V} <- Settings],
+ AuthNBackends = [{Num, {authn, BackendModule(V)}} || {["auth_backends", Num, "authn"], V} <- Settings],
+ AuthZBackends = [{Num, {authz, BackendModule(V)}} || {["auth_backends", Num, "authz"], V} <- Settings],
+ Backends = lists:foldl(
+ fun({NumStr, {Type, V}}, Acc) ->
+ Num = case catch list_to_integer(NumStr) of
+ N when is_integer(N) -> N;
+ Err ->
+ cuttlefish:invalid(
+ iolist_to_binary(io_lib:format(
+ "Auth backend position in the chain should be an integer ~p", [Err])))
+ end,
+ NewVal = case dict:find(Num, Acc) of
+ {ok, {AuthN, AuthZ}} ->
+ case {Type, AuthN, AuthZ} of
+ {authn, undefined, _} ->
+ {V, AuthZ};
+ {authz, _, undefined} ->
+ {AuthN, V};
+ _ ->
+ cuttlefish:invalid(
+ iolist_to_binary(
+ io_lib:format(
+ "Auth backend already defined for the ~pth ~p backend",
+ [Num, Type])))
+ end;
+ error ->
+ case Type of
+ authn -> {V, undefined};
+ authz -> {undefined, V};
+ default -> {V, V}
+ end
+ end,
+ dict:store(Num, NewVal, Acc)
+ end,
+ dict:new(),
+ AuthBackends ++ AuthNBackends ++ AuthZBackends),
+ lists:map(
+ fun
+ ({Num, {undefined, AuthZ}}) ->
+ cuttlefish:warn(
+ io_lib:format(
+ "Auth backend undefined for the ~pth authz backend. Using ~p",
+ [Num, AuthZ])),
+ {AuthZ, AuthZ};
+ ({Num, {AuthN, undefined}}) ->
+ cuttlefish:warn(
+ io_lib:format(
+ "Authz backend undefined for the ~pth authn backend. Using ~p",
+ [Num, AuthN])),
+ {AuthN, AuthN};
+ ({_Num, {Auth, Auth}}) -> Auth;
+ ({_Num, {AuthN, AuthZ}}) -> {AuthN, AuthZ}
+ end,
+ lists:keysort(1, dict:to_list(Backends)))
+end}.
+
+{mapping, "auth_backends.$num", "rabbit.auth_backends", [
+ {datatype, atom}
+]}.
+
+{mapping, "auth_backends.$num.authn", "rabbit.auth_backends",[
+ {datatype, atom}
+]}.
+
+{mapping, "auth_backends.$num.authz", "rabbit.auth_backends",[
+ {datatype, atom}
+]}.
+
+%% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
+%% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
+%% configuration section later in this file and the README in
+%% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
+%% details.
+%%
+%% To use the peer certificate's Common Name (CN) field
+%% instead of its Distinguished Name (DN) for username extraction.
+%%
+%% {ssl_cert_login_from, common_name},
+%%
+%% To use the first SAN value of type DNS:
+%%
+%% {ssl_cert_login_from, subject_alternative_name},
+%% {ssl_cert_login_san_type, dns},
+%% {ssl_cert_login_san_index, 0}
+
+{mapping, "ssl_cert_login_from", "rabbit.ssl_cert_login_from", [
+ {datatype, {enum, [distinguished_name, common_name, subject_alternative_name, subject_alt_name]}}
+]}.
+
+{mapping, "ssl_cert_login_san_type", "rabbit.ssl_cert_login_san_type", [
+ {datatype, {enum, [dns, ip, email, uri, other_name]}}
+]}.
+
+{mapping, "ssl_cert_login_san_index", "rabbit.ssl_cert_login_san_index", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%% TLS handshake timeout, in milliseconds.
+%%
+%% {ssl_handshake_timeout, 5000},
+
+{mapping, "ssl_handshake_timeout", "rabbit.ssl_handshake_timeout", [
+ {datatype, integer}
+]}.
+
+%% Cluster name
+
+{mapping, "cluster_name", "rabbit.cluster_name", [
+ {datatype, string}
+]}.
+
+%% Default worker process pool size. Used to limit maximum concurrency rate
+%% of certain operations, e.g. queue initialisation and recovery on node boot.
+
+{mapping, "default_worker_pool_size", "rabbit.default_worker_pool_size", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%% Password hashing implementation. Will only affect newly
+%% created users. To recalculate hash for an existing user
+%% it's necessary to update her password.
+%%
+%% When importing definitions exported from versions earlier
+%% than 3.6.0, it is possible to go back to MD5 (only do this
+%% as a temporary measure!) by setting this to rabbit_password_hashing_md5.
+%%
+%% To use SHA-512, set to rabbit_password_hashing_sha512.
+%%
+%% {password_hashing_module, rabbit_password_hashing_sha256},
+
+{mapping, "password_hashing_module", "rabbit.password_hashing_module", [
+ {datatype, atom}
+]}.
+
+%% Credential validation.
+%%
+
+{mapping, "credential_validator.validation_backend", "rabbit.credential_validator.validation_backend", [
+ {datatype, atom}
+]}.
+
+{mapping, "credential_validator.min_length", "rabbit.credential_validator.min_length", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{mapping, "credential_validator.regexp", "rabbit.credential_validator.regexp", [
+ {datatype, string}
+]}.
+
+
+
+%%
+%% Default User / VHost
+%% ====================
+%%
+
+%% On first start RabbitMQ will create a vhost and a user. These
+%% config items control what gets created. See
+%% https://www.rabbitmq.com/access-control.html for further
+%% information about vhosts and access control.
+%%
+%% {default_vhost, <<"/">>},
+%% {default_user, <<"guest">>},
+%% {default_pass, <<"guest">>},
+%% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
+
+{mapping, "default_vhost", "rabbit.default_vhost", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_vhost", Conf))
+end}.
+
+{mapping, "default_user", "rabbit.default_user", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_user",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_user", Conf))
+end}.
+
+{mapping, "default_pass", "rabbit.default_pass", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_pass",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_pass", Conf))
+end}.
+
+{mapping, "default_permissions.configure", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{mapping, "default_permissions.read", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{mapping, "default_permissions.write", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_permissions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("default_permissions", Conf),
+ Configure = proplists:get_value(["default_permissions", "configure"], Settings),
+ Read = proplists:get_value(["default_permissions", "read"], Settings),
+ Write = proplists:get_value(["default_permissions", "write"], Settings),
+ [list_to_binary(Configure), list_to_binary(Read), list_to_binary(Write)]
+end}.
+
+%% Tags for default user
+%%
+%% For more details about tags, see the documentation for the
+%% Management Plugin at https://www.rabbitmq.com/management.html.
+%%
+%% {default_user_tags, [administrator]},
+
+{mapping, "default_user_tags.$tag", "rabbit.default_user_tags",
+ [{datatype, {enum, [true, false]}}]}.
+
+{translation, "rabbit.default_user_tags",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("default_user_tags", Conf),
+ [ list_to_atom(Key) || {[_,Key], Val} <- Settings, Val == true ]
+end}.
+
+%%
+%% Additional network and protocol related configuration
+%% =====================================================
+%%
+
+%% Set the default connection heartbeat timeout (in seconds).
+%%
+%% {heartbeat, 600},
+
+{mapping, "heartbeat", "rabbit.heartbeat", [{datatype, integer}]}.
+
+%% Set the max permissible size of an AMQP 0-9-1 frame (in bytes).
+%%
+%% {frame_max, 131072},
+
+{mapping, "frame_max", "rabbit.frame_max", [{datatype, bytesize}]}.
+
+%% Set the max frame size the server will accept before connection
+%% tuning starts
+%%
+%% {initial_frame_max, 4096},
+
+{mapping, "initial_frame_max", "rabbit.initial_frame_max", [{datatype, bytesize}]}.
+
+%% Set the max permissible number of channels per connection.
+%% 0 means "no limit".
+%%
+%% {channel_max, 0},
+
+{mapping, "channel_max", "rabbit.channel_max", [{datatype, integer}]}.
+
+%% Set the max permissible number of client connections per node.
+%% `infinity` means "no limit".
+%%
+%% {connection_max, infinity},
+
+{mapping, "connection_max", "rabbit.connection_max",
+ [{datatype, [{atom, infinity}, integer]}]}.
+
+{translation, "rabbit.connection_max",
+ fun(Conf) ->
+ case cuttlefish:conf_get("connection_max", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ infinity -> infinity;
+ Val when is_integer(Val) -> Val;
+ _ -> cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+
+{mapping, "max_message_size", "rabbit.max_message_size",
+ [{datatype, integer}, {validators, ["less_then_512MB"]}]}.
+
+%% Customising Socket Options.
+%%
+%% See (https://www.erlang.org/doc/man/inet.html#setopts-2) for
+%% further documentation.
+%%
+%% {tcp_listen_options, [{backlog, 128},
+%% {nodelay, true},
+%% {exit_on_close, false}]},
+
+%% TCP listener section ======================================================
+
+{mapping, "tcp_listen_options", "rabbit.tcp_listen_options", [
+ {datatype, {enum, [none]}}]}.
+
+{translation, "rabbit.tcp_listen_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("tcp_listen_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid tcp_listen_options")
+ end
+end}.
+
+{mapping, "tcp_listen_options.backlog", "rabbit.tcp_listen_options.backlog", [
+ {datatype, integer}
+]}.
+
+{mapping, "tcp_listen_options.nodelay", "rabbit.tcp_listen_options.nodelay", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "tcp_listen_options.buffer", "rabbit.tcp_listen_options.buffer",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.delay_send", "rabbit.tcp_listen_options.delay_send",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.dontroute", "rabbit.tcp_listen_options.dontroute",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.exit_on_close", "rabbit.tcp_listen_options.exit_on_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.fd", "rabbit.tcp_listen_options.fd",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.high_msgq_watermark", "rabbit.tcp_listen_options.high_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.high_watermark", "rabbit.tcp_listen_options.high_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.keepalive", "rabbit.tcp_listen_options.keepalive",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.low_msgq_watermark", "rabbit.tcp_listen_options.low_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.low_watermark", "rabbit.tcp_listen_options.low_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.port", "rabbit.tcp_listen_options.port",
+ [{datatype, integer}, {validators, ["port"]}]}.
+
+{mapping, "tcp_listen_options.priority", "rabbit.tcp_listen_options.priority",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.recbuf", "rabbit.tcp_listen_options.recbuf",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.send_timeout", "rabbit.tcp_listen_options.send_timeout",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.send_timeout_close", "rabbit.tcp_listen_options.send_timeout_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.sndbuf", "rabbit.tcp_listen_options.sndbuf",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.tos", "rabbit.tcp_listen_options.tos",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.linger.on", "rabbit.tcp_listen_options.linger",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.linger.timeout", "rabbit.tcp_listen_options.linger",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{translation, "rabbit.tcp_listen_options.linger",
+fun(Conf) ->
+ LingerOn = cuttlefish:conf_get("tcp_listen_options.linger.on", Conf, false),
+ LingerTimeout = cuttlefish:conf_get("tcp_listen_options.linger.timeout", Conf, 0),
+ {LingerOn, LingerTimeout}
+end}.
+
+
+%% ==========================================================================
+
+%%
+%% Resource Limits & Flow Control
+%% ==============================
+%%
+%% See https://www.rabbitmq.com/memory.html for full details.
+
+%% Memory-based Flow Control threshold.
+%%
+%% {vm_memory_high_watermark, 0.4},
+
+%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
+%%
+%% {vm_memory_high_watermark, {absolute, 1073741824}},
+%%
+%% Or you can set absolute value using memory unit symbols (with RabbitMQ 3.6.0+).
+%%
+%% {vm_memory_high_watermark, {absolute, "1024M"}},
+%%
+%% Supported unit symbols:
+%%
+%% k, kiB: kibibytes (2^10 - 1,024 bytes)
+%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
+%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
+%% kB: kilobytes (10^3 - 1,000 bytes)
+%% MB: megabytes (10^6 - 1,000,000 bytes)
+%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
+
+{mapping, "vm_memory_high_watermark.relative", "rabbit.vm_memory_high_watermark", [
+ {datatype, float}]}.
+
+{mapping, "vm_memory_high_watermark.absolute", "rabbit.vm_memory_high_watermark", [
+ {datatype, [integer, string]}]}.
+
+
+{translation, "rabbit.vm_memory_high_watermark",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("vm_memory_high_watermark", Conf),
+ Absolute = proplists:get_value(["vm_memory_high_watermark", "absolute"], Settings),
+ Relative = proplists:get_value(["vm_memory_high_watermark", "relative"], Settings),
+ case {Absolute, Relative} of
+ {undefined, undefined} -> cuttlefish:invalid("No vm watermark defined");
+ {_, undefined} -> {absolute, Absolute};
+ _ -> Relative
+ end
+end}.
+
+%% Fraction of the high watermark limit at which queues start to
+%% page message out to disc in order to free up memory.
+%%
+%% Values greater than 0.9 can be dangerous and should be used carefully.
+%%
+%% {vm_memory_high_watermark_paging_ratio, 0.5},
+
+{mapping, "vm_memory_high_watermark_paging_ratio",
+ "rabbit.vm_memory_high_watermark_paging_ratio",
+ [{datatype, float}, {validators, ["less_than_1"]}]}.
+
+%% Interval (in milliseconds) at which we perform the check of the memory
+%% levels against the watermarks.
+%%
+%% {memory_monitor_interval, 2500},
+
+{mapping, "memory_monitor_interval", "rabbit.memory_monitor_interval",
+ [{datatype, integer}]}.
+
+%% Selects Erlang VM memory consumption calculation strategy.
+%% Can be `allocated`, `rss` or `legacy` (aliased as `erlang`).
+%%
+%% {vm_memory_calculation_strategy, rss},
+
+{mapping, "vm_memory_calculation_strategy", "rabbit.vm_memory_calculation_strategy",
+ [{datatype, {enum, [rss, erlang, allocated, legacy]}}]}.
+
+%% The total memory available can be calculated from the OS resources
+%% (default option) or provided as a configuration parameter
+{mapping, "total_memory_available_override_value", "rabbit.total_memory_available_override_value", [
+ {datatype, [integer, string]}]}.
+
+%% Set disk free limit (in bytes). Once free disk space reaches this
+%% lower bound, a disk alarm will be set - see the documentation
+%% listed above for more details.
+%%
+%% {disk_free_limit, 50000000},
+%%
+%% Or you can set it using memory units (same as in vm_memory_high_watermark)
+%% with RabbitMQ 3.6.0+.
+%% {disk_free_limit, "50MB"},
+%% {disk_free_limit, "50000kB"},
+%% {disk_free_limit, "2GB"},
+
+%% Alternatively, we can set a limit relative to total available RAM.
+%%
+%% Values lower than 1.0 can be dangerous and should be used carefully.
+%% {disk_free_limit, {mem_relative, 2.0}},
+
+{mapping, "disk_free_limit.relative", "rabbit.disk_free_limit", [
+ {datatype, float}]}.
+
+{mapping, "disk_free_limit.absolute", "rabbit.disk_free_limit", [
+ {datatype, [integer, string]}]}.
+
+
+{translation, "rabbit.disk_free_limit",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("disk_free_limit", Conf),
+ Absolute = proplists:get_value(["disk_free_limit", "absolute"], Settings),
+ Relative = proplists:get_value(["disk_free_limit", "relative"], Settings),
+ case {Absolute, Relative} of
+ {undefined, undefined} -> cuttlefish:invalid("No disk limit defined");
+ {_, undefined} -> Absolute;
+ _ -> {mem_relative, Relative}
+ end
+end}.
+
+%%
+%% Clustering
+%% =====================
+%%
+
+%% How to respond to cluster partitions.
+%% See https://www.rabbitmq.com/partitions.html for further details.
+%%
+%% {cluster_partition_handling, ignore},
+
+{mapping, "cluster_partition_handling", "rabbit.cluster_partition_handling",
+ [{datatype, {enum, [ignore, pause_minority, autoheal, pause_if_all_down]}}]}.
+
+{mapping, "cluster_partition_handling.pause_if_all_down.recover",
+ "rabbit.cluster_partition_handling",
+ [{datatype, {enum, [ignore, autoheal]}}]}.
+
+{mapping, "cluster_partition_handling.pause_if_all_down.nodes.$name",
+ "rabbit.cluster_partition_handling",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.cluster_partition_handling",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_partition_handling", Conf) of
+ pause_if_all_down ->
+ PauseIfAllDownNodes = cuttlefish_variable:filter_by_prefix(
+ "cluster_partition_handling.pause_if_all_down.nodes",
+ Conf),
+ case PauseIfAllDownNodes of
+ [] ->
+ cuttlefish:invalid("Nodes required for pause_if_all_down");
+ _ ->
+ Nodes = [ V || {K,V} <- PauseIfAllDownNodes ],
+ PauseIfAllDownRecover = cuttlefish:conf_get(
+ "cluster_partition_handling.pause_if_all_down.recover",
+ Conf),
+ case PauseIfAllDownRecover of
+ Recover when Recover == ignore; Recover == autoheal ->
+ {pause_if_all_down, Nodes, Recover};
+ Invalid ->
+ cuttlefish:invalid("Recover strategy required for pause_if_all_down")
+ end
+ end;
+ Other -> Other
+ end
+end}.
+
+%% Number of delegate processes to use for intra-cluster
+%% communication. On a machine which has a very large number of cores
+%% and is also part of a cluster, you may wish to increase this value.
+%%
+
+{mapping, "delegate_count", "rabbit.delegate_count", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%% Mirror sync batch size, in messages. Increasing this will speed
+%% up syncing but total batch size in bytes must not exceed 2 GiB.
+%% Available in RabbitMQ 3.6.0 or later.
+%%
+%% {mirroring_sync_batch_size, 4096},
+
+{mapping, "mirroring_sync_batch_size", "rabbit.mirroring_sync_batch_size",
+ [{datatype, bytesize}, {validators, ["size_less_than_2G"]}]}.
+
+%% Peer discovery backend used by cluster formation.
+%%
+
+{mapping, "cluster_formation.peer_discovery_backend", "rabbit.cluster_formation.peer_discovery_backend", [
+ {datatype, atom}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_backend",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, rabbit_peer_discovery_classic_config) of
+ classic_config -> rabbit_peer_discovery_classic_config;
+ classic -> rabbit_peer_discovery_classic_config;
+ config -> rabbit_peer_discovery_classic_config;
+ dns -> rabbit_peer_discovery_dns;
+ aws -> rabbit_peer_discovery_aws;
+ consul -> rabbit_peer_discovery_consul;
+ etcd -> rabbit_peer_discovery_etcd;
+ kubernetes -> rabbit_peer_discovery_k8s;
+ k8s -> rabbit_peer_discovery_k8s;
+ Module -> Module
+ end
+end}.
+
+%% Own node type, used by cluster formation.
+%%
+
+{mapping, "cluster_formation.node_type", "rabbit.cluster_formation.node_type", [
+ {datatype, {enum, [disc, disk, ram]}}
+]}.
+
+{translation, "rabbit.cluster_formation.node_type",
+fun(Conf) ->
+ %% if peer discovery backend isn't configured, don't generate
+ %% node type
+ case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ _Backend ->
+ case cuttlefish:conf_get("cluster_formation.node_type", Conf) of
+ disc -> disc;
+ %% always cast to `disc`
+ disk -> disc;
+ ram -> ram;
+ _Other -> disc
+ end
+ end
+end}.
+
+%% Cluster formation: Randomized startup delay
+
+{mapping, "cluster_formation.randomized_startup_delay_range.min", "rabbit.cluster_formation.randomized_startup_delay_range",
+ [{datatype, integer}]}.
+{mapping, "cluster_formation.randomized_startup_delay_range.max", "rabbit.cluster_formation.randomized_startup_delay_range",
+ [{datatype, integer}]}.
+
+{translation, "rabbit.cluster_formation.randomized_startup_delay_range",
+fun(Conf) ->
+ Min = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.min", Conf, undefined),
+ Max = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.max", Conf, undefined),
+
+ case {Min, Max} of
+ {undefined, undefined} ->
+ cuttlefish:unset();
+ {undefined, Max} ->
+ %% fallback default
+ {5, Max};
+ {Min, undefined} ->
+ %% fallback default
+ {Min, 60};
+ {Min, Max} ->
+ {Min, Max}
+ end
+end}.
+
+%% Cluster formation: discovery failure retries
+
+{mapping, "cluster_formation.lock_retry_limit", "rabbit.cluster_formation.lock_retry_limit",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+{mapping, "cluster_formation.lock_retry_timeout", "rabbit.cluster_formation.lock_retry_timeout",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+
+{mapping, "cluster_formation.discovery_retry_limit", "rabbit.cluster_formation.discovery_retry_limit",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+{mapping, "cluster_formation.discovery_retry_interval", "rabbit.cluster_formation.discovery_retry_interval",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+
+%% Classic config-driven peer discovery backend.
+%%
+%% Make clustering happen *automatically* at startup - only applied
+%% to nodes that have just been reset or started for the first time.
+%% See https://www.rabbitmq.com/clustering.html#auto-config for
+%% further details.
+%%
+%% {cluster_nodes, {['rabbit@my.host.com'], disc}},
+
+{mapping, "cluster_formation.classic_config.nodes.$node", "rabbit.cluster_nodes",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.cluster_nodes",
+fun(Conf) ->
+ Nodes = [V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_formation.classic_config.nodes", Conf)],
+
+ case Nodes of
+ [] -> cuttlefish:unset();
+ Other ->
+ case cuttlefish:conf_get("cluster_formation.node_type", Conf, disc) of
+ disc -> {Other, disc};
+ %% Always cast to `disc`
+ disk -> {Other, disc};
+ ram -> {Other, ram}
+ end
+ end
+end}.
+
+%% DNS (A records and reverse lookups)-based peer discovery.
+%%
+
+{mapping, "cluster_formation.dns.hostname", "rabbit.cluster_formation.peer_discovery_dns.hostname",
+ [{datatype, string}]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_dns.hostname",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.dns.hostname", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> list_to_binary(Value)
+ end
+end}.
+
+
+%% Interval (in milliseconds) at which we send keepalive messages
+%% to other cluster members. Note that this is not the same thing
+%% as net_ticktime; missed keepalive messages will not cause nodes
+%% to be considered down.
+%%
+%% {cluster_keepalive_interval, 10000},
+
+{mapping, "cluster_keepalive_interval", "rabbit.cluster_keepalive_interval",
+ [{datatype, integer}]}.
+
+%% Queue master locator
+%%
+
+{mapping, "queue_master_locator", "rabbit.queue_master_locator",
+ [{datatype, string}]}.
+
+{translation, "rabbit.queue_master_locator",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("queue_master_locator", Conf))
+end}.
+
+%%
+%% Statistics Collection
+%% =====================
+%%
+
+%% Set (internal) statistics collection granularity.
+%%
+%% {collect_statistics, none},
+
+{mapping, "collect_statistics", "rabbit.collect_statistics",
+ [{datatype, {enum, [none, coarse, fine]}}]}.
+
+%% Statistics collection interval (in milliseconds). Increasing
+%% this will reduce the load on management database.
+%%
+%% {collect_statistics_interval, 5000},
+
+{mapping, "collect_statistics_interval", "rabbit.collect_statistics_interval",
+ [{datatype, integer}]}.
+
+%%
+%% Misc/Advanced Options
+%% =====================
+%%
+%% NB: Change these only if you understand what you are doing!
+%%
+
+%% Explicitly enable/disable hipe compilation.
+%%
+%% {hipe_compile, true},
+%%
+%% DEPRECATED: this is a no-op and is kept only to allow old configs.
+
+{mapping, "hipe_compile", "rabbit.hipe_compile",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Timeout used when waiting for Mnesia tables in a cluster to
+%% become available.
+%%
+%% {mnesia_table_loading_retry_timeout, 30000},
+
+{mapping, "mnesia_table_loading_retry_timeout", "rabbit.mnesia_table_loading_retry_timeout",
+ [{datatype, integer}]}.
+
+%% Retries when waiting for Mnesia tables in the cluster startup. Note that
+%% this setting is not applied to Mnesia upgrades or node deletions.
+%%
+%% {mnesia_table_loading_retry_limit, 10},
+
+{mapping, "mnesia_table_loading_retry_limit", "rabbit.mnesia_table_loading_retry_limit",
+ [{datatype, integer}]}.
+
+{mapping, "message_store_shutdown_timeout", "rabbit.msg_store_shutdown_timeout",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+
+%% Size in bytes below which to embed messages in the queue index. See
+%% https://www.rabbitmq.com/persistence-conf.html
+%%
+%% {queue_index_embed_msgs_below, 4096}
+
+{mapping, "queue_index_embed_msgs_below", "rabbit.queue_index_embed_msgs_below",
+ [{datatype, bytesize}]}.
+
+%% Whether or not to enable background GC.
+%%
+%% {background_gc_enabled, true}
+
+{mapping, "background_gc_enabled", "rabbit.background_gc_enabled",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Interval (in milliseconds) at which we run background GC.
+%%
+%% {background_gc_target_interval, 60000}
+
+{mapping, "background_gc_target_interval", "rabbit.background_gc_target_interval",
+ [{datatype, integer}]}.
+
+%% Whether or not to enable proxy protocol support.
+%%
+%% {proxy_protocol, false}
+
+{mapping, "proxy_protocol", "rabbit.proxy_protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Whether to stop the rabbit application if a vhost has
+%% to terminate for any reason.
+
+{mapping, "vhost_restart_strategy", "rabbit.vhost_restart_strategy",
+ [{datatype, {enum, [stop_node, continue, transient, persistent]}}]}.
+
+%% Approximate maximum time a consumer can spend processing a message before
+%% the channel is terminated, in milliseconds. Default is no timeout.
+%%
+%% {consumer_timeout, 10000},
+
+{mapping, "consumer_timeout", "rabbit.consumer_timeout", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+%% Product name & version overrides.
+
+{mapping, "product.name", "rabbit.product_name", [
+ {datatype, string}
+]}.
+{mapping, "product.version", "rabbit.product_version", [
+ {datatype, string}
+]}.
+
+%% Message of the day file.
+%% The content of that file is added to the banners, both logged and
+%% printed.
+
+{mapping, "motd_file", "rabbit.motd_file", [
+ {datatype, string}
+]}.
+
+% ==========================
+% Lager section
+% ==========================
+
+{mapping, "log.dir", "lager.log_root", [
+ {datatype, string},
+ {validators, ["dir_writable"]}]}.
+
+{mapping, "log.console", "rabbit.log.console.enabled", [
+ {datatype, {enum, [true, false]}}
+]}.
+{mapping, "log.console.level", "rabbit.log.console.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+
+{mapping, "log.exchange", "rabbit.log.exchange.enabled", [
+ {datatype, {enum, [true, false]}}
+]}.
+{mapping, "log.exchange.level", "rabbit.log.exchange.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+
+{mapping, "log.syslog", "rabbit.log.syslog.enabled", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "log.syslog.level", "rabbit.log.syslog.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+
+{mapping, "log.syslog.identity", "syslog.app_name", [
+ {datatype, string}
+]}.
+
+{mapping, "log.syslog.facility", "syslog.facility", [
+ {datatype, {enum, [kern, kernel, user, mail, daemon, auth, syslog, lpr,
+ news, uucp, cron, authpriv, ftp, ntp, audit, alert,
+ clock, local0, local1, local2, local3, local4,
+ local5, local6, local7]}}
+]}.
+
+{mapping, "log.syslog.multiline_mode", "syslog.multiline_mode", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "log.syslog.ip", "syslog.dest_host", [
+ {datatype, string},
+ {validators, ["is_ip"]}
+]}.
+
+{mapping, "log.syslog.host", "syslog.dest_host", [
+ {datatype, string}
+]}.
+
+{translation, "syslog.dest_host",
+fun(Conf) ->
+ case cuttlefish:conf_get("log.syslog", Conf) of
+ true ->
+ case cuttlefish:conf_get("log.syslog.ip", Conf, undefined) of
+ undefined ->
+ % If log.syslog.ip is not set, then this must be set
+ cuttlefish:conf_get("log.syslog.host", Conf);
+ IpAddr ->
+ IpAddr
+ end;
+ _ ->
+ cuttlefish:invalid("log.syslog must be set to true to set log.syslog.host or log.syslog.ip")
+ end
+end}.
+
+{mapping, "log.syslog.port", "syslog.dest_port", [
+ {datatype, integer}
+]}.
+
+{mapping, "log.syslog.transport", "syslog.protocol", [
+ {datatype, {enum, [udp, tcp, tls, ssl]}}
+]}.
+{mapping, "log.syslog.protocol", "syslog.protocol", [
+ {datatype, {enum, [rfc3164, rfc5424]}}
+]}.
+{mapping, "log.syslog.ssl_options.verify", "syslog.protocol", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "log.syslog.ssl_options.fail_if_no_peer_cert", "syslog.protocol", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.cacertfile", "syslog.protocol",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "log.syslog.ssl_options.certfile", "syslog.protocol",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "log.syslog.ssl_options.cacerts.$name", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.cert", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.client_renegotiation", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.crl_check", "syslog.protocol",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "log.syslog.ssl_options.depth", "syslog.protocol",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "log.syslog.ssl_options.dh", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.dhfile", "syslog.protocol",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "log.syslog.ssl_options.honor_cipher_order", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.honor_ecc_order", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.key.RSAPrivateKey", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.key.DSAPrivateKey", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.key.PrivateKeyInfo", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.keyfile", "syslog.protocol",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "log.syslog.ssl_options.log_alert", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.password", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.psk_identity", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.reuse_sessions", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.secure_renegotiate", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.versions.$version", "syslog.protocol",
+ [{datatype, atom}]}.
+
+{translation, "syslog.protocol",
+fun(Conf) ->
+ ParseSslOptions = fun() ->
+ RawSettings = [
+ {verify, cuttlefish:conf_get("log.syslog.ssl_options.verify", Conf, undefined)},
+ {fail_if_no_peer_cert, cuttlefish:conf_get("log.syslog.ssl_options.fail_if_no_peer_cert", Conf, undefined)},
+ {cacertfile, cuttlefish:conf_get("log.syslog.ssl_options.cacertfile", Conf, undefined)},
+ {certfile, cuttlefish:conf_get("log.syslog.ssl_options.certfile", Conf, undefined)},
+ {cert, cuttlefish:conf_get("log.syslog.ssl_options.cert", Conf, undefined)},
+ {client_renegotiation, cuttlefish:conf_get("log.syslog.ssl_options.client_renegotiation", Conf, undefined)},
+ {crl_check, cuttlefish:conf_get("log.syslog.ssl_options.crl_check", Conf, undefined)},
+ {depth, cuttlefish:conf_get("log.syslog.ssl_options.depth", Conf, undefined)},
+ {dh, cuttlefish:conf_get("log.syslog.ssl_options.dh", Conf, undefined)},
+ {dhfile, cuttlefish:conf_get("log.syslog.ssl_options.dhfile", Conf, undefined)},
+ {honor_cipher_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_cipher_order", Conf, undefined)},
+ {honor_ecc_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_ecc_order", Conf, undefined)},
+
+ {keyfile, cuttlefish:conf_get("log.syslog.ssl_options.keyfile", Conf, undefined)},
+ {log_alert, cuttlefish:conf_get("log.syslog.ssl_options.log_alert", Conf, undefined)},
+ {password, cuttlefish:conf_get("log.syslog.ssl_options.password", Conf, undefined)},
+ {psk_identity, cuttlefish:conf_get("log.syslog.ssl_options.psk_identity", Conf, undefined)},
+ {reuse_sessions, cuttlefish:conf_get("log.syslog.ssl_options.reuse_sessions", Conf, undefined)},
+ {secure_renegotiate, cuttlefish:conf_get("log.syslog.ssl_options.secure_renegotiate", Conf, undefined)}
+ ],
+ DefinedSettings = [{K, V} || {K, V} <- RawSettings, V =/= undefined],
+
+ lists:map(
+ fun({K, Val}) when K == dh; K == cert -> {K, list_to_binary(Val)};
+ ({K, Val}) -> {K, Val}
+ end,
+ DefinedSettings) ++
+ [ {K, V}
+ || {K, V} <-
+ [{cacerts, [ list_to_binary(V) || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.cacerts", Conf)]},
+ {versions, [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.versions", Conf) ]},
+ {key, case cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end}],
+ V =/= undefined,
+ V =/= []]
+ end,
+
+ Proto = cuttlefish:conf_get("log.syslog.protocol", Conf, undefined),
+ Transport = cuttlefish:conf_get("log.syslog.transport", Conf, udp),
+ case Transport of
+ TLS when TLS == tls; TLS == ssl ->
+ case Proto of
+ rfc3164 ->
+ cuttlefish:invalid("Syslog protocol rfc3164 is not compatible with TLS");
+ _ ->
+ {rfc5424, tls, ParseSslOptions()}
+ end;
+ _ when Transport == udp; Transport == tcp ->
+ case Proto of
+ undefined -> {rfc3164, Transport};
+ _ -> {Proto, Transport}
+ end;
+ _ -> cuttlefish:invalid("Invalid syslog transport ~p~n", [Transport])
+ end
+end}.
+
+{mapping, "log.file", "rabbit.log.file.file", [
+ {datatype, [{enum, [false]}, string]}
+]}.
+{mapping, "log.file.level", "rabbit.log.file.level", [
+ {datatype,
+ {enum, ['=debug', debug,
+ info, '!=info',
+ notice, '<=notice',
+ '<warning', warning,
+ error,
+ critical,
+ alert,
+ emergency,
+ none]}}
+]}.
+{mapping, "log.file.rotation.date", "rabbit.log.file.date", [
+ {datatype, string}
+]}.
+{mapping, "log.file.rotation.size", "rabbit.log.file.size", [
+ {datatype, integer}
+]}.
+{mapping, "log.file.rotation.count", "rabbit.log.file.count", [
+ {datatype, integer}
+]}.
+
+%% Log categories
+
+{mapping, "log.connection.level", "rabbit.log.categories.connection.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.connection.file", "rabbit.log.categories.connection.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.channel.level", "rabbit.log.categories.channel.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.channel.file", "rabbit.log.categories.channel.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.mirroring.level", "rabbit.log.categories.mirroring.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.mirroring.file", "rabbit.log.categories.mirroring.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.queue.level", "rabbit.log.categories.queue.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.queue.file", "rabbit.log.categories.queue.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.federation.level", "rabbit.log.categories.federation.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.federation.file", "rabbit.log.categories.federation.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.upgrade.level", "rabbit.log.categories.upgrade.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.upgrade.file", "rabbit.log.categories.upgrade.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.ra.level", "rabbit.log.categories.ra.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.ra.file", "rabbit.log.categories.ra.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.default.level", "rabbit.log.categories.default.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+
+% ==========================
+% Kernel section
+% ==========================
+
+{mapping, "net_ticktime", "kernel.net_ticktime",[
+ {datatype, [integer]},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+% ==========================
+% sysmon_handler section
+% ==========================
+
+%% @doc The threshold at which to warn about the number of processes
+%% that are overly busy. Processes with large heaps or that take a
+%% long time to garbage collect will count toward this threshold.
+{mapping, "sysmon_handler.thresholds.busy_processes", "sysmon_handler.process_limit", [
+ {datatype, integer},
+ hidden
+]}.
+
+{translation, "sysmon_handler.process_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.thresholds.busy_processes", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ Int when is_integer(Int) ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc The threshold at which to warn about the number of ports that
+%% are overly busy. Ports with full input buffers count toward this
+%% threshold.
+{mapping, "sysmon_handler.thresholds.busy_ports", "sysmon_handler.port_limit", [
+ {datatype, integer},
+ hidden
+]}.
+
+{translation, "sysmon_handler.port_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.thresholds.busy_ports", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ Int when is_integer(Int) ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc A process will become busy when it exceeds this amount of time
+%% doing garbage collection.
+%% @see sysmon_handler.thresholds.busy_processes
+{mapping, "sysmon_handler.triggers.process.garbage_collection", "sysmon_handler.gc_ms_limit", [
+ {datatype, [{atom, off},
+ {duration, ms}]},
+ hidden
+]}.
+
+{translation, "sysmon_handler.gc_ms_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.process.garbage_collection", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ off ->
+ 0;
+ Int when is_integer(Int) ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc A process will become busy when it exceeds this amount of time
+%% during a single process scheduling & execution cycle.
+{mapping, "sysmon_handler.triggers.process.long_scheduled_execution", "sysmon_handler.schedule_ms_limit", [
+ {datatype, [{atom, off},
+ {duration, ms}]},
+ hidden
+]}.
+
+{translation, "sysmon_handler.schedule_ms_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.process.long_scheduled_execution", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ off ->
+ 0;
+ Int when is_integer(Int) ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc A process will become busy when its heap exceeds this size.
+%% @see sysmon_handler.thresholds.busy_processes
+{mapping, "sysmon_handler.triggers.process.heap_size", "sysmon_handler.heap_word_limit", [
+ {datatype, [{atom, off},
+ bytesize]},
+ hidden
+]}.
+
+{translation, "sysmon_handler.heap_word_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.process.heap_size", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ off ->
+ 0;
+ Bytes when is_integer(Bytes) ->
+ WordSize = erlang:system_info(wordsize),
+ Bytes div WordSize;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc Whether ports with full input buffers will be counted as
+%% busy. Ports can represent open files or network sockets.
+%% @see sysmon_handler.thresholds.busy_ports
+{mapping, "sysmon_handler.triggers.port", "sysmon_handler.busy_port", [
+ {datatype, flag},
+ hidden
+]}.
+
+{translation, "sysmon_handler.busy_port",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.port", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+%% @doc Whether distribution ports with full input buffers will be
+%% counted as busy. Distribution ports connect Erlang nodes within a
+%% single cluster.
+%% @see sysmon_handler.thresholds.busy_ports
+{mapping, "sysmon_handler.triggers.distribution_port", "sysmon_handler.busy_dist_port", [
+ {datatype, flag},
+ hidden
+]}.
+
+{translation, "sysmon_handler.busy_dist_port",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.distribution_port", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+%%
+%% Ra
+%%
+
+{mapping, "raft.segment_max_entries", "ra.segment_max_entries", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.segment_max_entries",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.segment_max_entries", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.wal_max_size_bytes", "ra.wal_max_size_bytes", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.wal_max_size_bytes",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.wal_max_size_bytes", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.wal_max_entries", "ra.wal_max_entries", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.wal_max_entries",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.wal_max_entries", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.wal_hibernate_after", "ra.wal_hibernate_after", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.wal_hibernate_after",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.wal_hibernate_after", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.wal_max_batch_size", "ra.wal_max_batch_size", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.wal_max_batch_size",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.wal_max_batch_size", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.snapshot_chunk_size", "ra.snapshot_chunk_size", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.snapshot_chunk_size",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.snapshot_chunk_size", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.data_dir", "ra.data_dir", [
+ {datatype, string}
+]}.
+
+{translation, "ra.data_dir",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.data_dir", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+% ===============================
+% Validators
+% ===============================
+
+{validator, "size_less_than_2G", "Byte size should be less than 2G and greater than 0",
+fun(Size) when is_integer(Size) ->
+ Size > 0 andalso Size < 2147483648
+end}.
+
+{validator, "less_then_512MB", "Max message size should be less than 512MB and gre than 0",
+fun(Size) when is_integer(Size) ->
+ Size > 0 andalso Size < 536870912
+end}.
+
+{validator, "less_than_1", "Float is not between 0 and 1",
+fun(Float) when is_float(Float) ->
+ Float > 0 andalso Float < 1
+end}.
+
+{validator, "port", "Invalid port number",
+fun(Port) when is_integer(Port) ->
+ Port > 0 andalso Port < 65535
+end}.
+
+{validator, "byte", "Integer must be in the range [0, 255]",
+fun(Int) when is_integer(Int) ->
+ Int >= 0 andalso Int =< 255
+end}.
+
+{validator, "dir_writable", "Cannot create file in dir",
+fun(Dir) ->
+ TestFile = filename:join(Dir, "test_file"),
+ file:delete(TestFile),
+ Res = ok == file:write_file(TestFile, <<"test">>),
+ file:delete(TestFile),
+ Res
+end}.
+
+{validator, "file_accessible", "file doesn't exist or isn't readable",
+fun(File) ->
+ ReadFile = file:read_file_info(File),
+ element(1, ReadFile) == ok
+end}.
+
+{validator, "is_ip", "string is a valid IP address",
+fun(IpStr) ->
+ Res = inet:parse_address(IpStr),
+ element(1, Res) == ok
+end}.
+
+{validator, "non_negative_integer", "number should be greater or equal to zero",
+fun(Int) when is_integer(Int) ->
+ Int >= 0
+end}.
+
+{validator, "non_zero_positive_integer", "number should be greater or equal to one",
+fun(Int) when is_integer(Int) ->
+ Int >= 1
+end}.
diff --git a/deps/rabbit/rabbitmq-components.mk b/deps/rabbit/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbit/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbit/scripts/rabbitmq-defaults b/deps/rabbit/scripts/rabbitmq-defaults
new file mode 100755
index 0000000000..41d72c7da4
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-defaults
@@ -0,0 +1,18 @@
+#!/bin/sh -e
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+### next line potentially updated in package install steps
+SYS_PREFIX=
+
+CLEAN_BOOT_FILE=start_clean
+SASL_BOOT_FILE=start_sasl
+BOOT_MODULE="rabbit"
+
+if test -z "$CONF_ENV_FILE" && test -z "$RABBITMQ_CONF_ENV_FILE"; then
+ CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
+fi
diff --git a/deps/rabbit/scripts/rabbitmq-defaults.bat b/deps/rabbit/scripts/rabbitmq-defaults.bat
new file mode 100644
index 0000000000..41b3d2b47c
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-defaults.bat
@@ -0,0 +1,21 @@
+@echo off
+
+set SASL_BOOT_FILE=start_sasl
+set CLEAN_BOOT_FILE=start_clean
+set BOOT_MODULE=rabbit
+
+if "!RABBITMQ_BASE!"=="" (
+ set RABBITMQ_BASE=!APPDATA!\RabbitMQ
+) else (
+ set RABBITMQ_BASE=!RABBITMQ_BASE:"=!
+)
+
+if not exist "!RABBITMQ_BASE!" (
+ mkdir "!RABBITMQ_BASE!"
+)
+
+if "!RABBITMQ_CONF_ENV_FILE!"=="" (
+ if "!CONF_ENV_FILE!"=="" (
+ set CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat
+ )
+)
diff --git a/deps/rabbit/scripts/rabbitmq-diagnostics b/deps/rabbit/scripts/rabbitmq-diagnostics
new file mode 100755
index 0000000000..7101f3cc9b
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-diagnostics
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-diagnostics "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-diagnostics.bat b/deps/rabbit/scripts/rabbitmq-diagnostics.bat
new file mode 100644
index 0000000000..af2982559c
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-diagnostics.bat
@@ -0,0 +1,55 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-diagnostics" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-echopid.bat b/deps/rabbit/scripts/rabbitmq-echopid.bat
new file mode 100644
index 0000000000..98080afd1f
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-echopid.bat
@@ -0,0 +1,57 @@
+@echo off
+
+REM Usage: rabbitmq-echopid.bat <rabbitmq_nodename>
+REM
+REM <rabbitmq_nodename> (s)name of the erlang node to connect to (required)
+
+setlocal
+
+set TDP0=%~dp0
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if "%1"=="" goto argfail
+
+:: set timeout vars ::
+set TIMEOUT=10
+set TIMER=1
+
+:: check that wmic exists ::
+set WMIC_PATH=%SYSTEMROOT%\System32\Wbem\wmic.exe
+if not exist "%WMIC_PATH%" (
+ goto fail
+)
+
+:getpid
+for /f "usebackq tokens=* skip=1" %%P IN (`%%WMIC_PATH%% process where "name='erl.exe' and commandline like '%%%RABBITMQ_NAME_TYPE% %1%%'" get processid 2^>nul`) do (
+ set PID=%%P
+ goto echopid
+)
+
+:echopid
+:: check for pid not found ::
+if "%PID%" == "" (
+ PING 127.0.0.1 -n 2 > nul
+ set /a TIMER+=1
+ if %TIMEOUT%==%TIMER% goto fail
+ goto getpid
+)
+
+:: show pid ::
+echo %PID%
+
+:: all done ::
+:ok
+endlocal
+EXIT /B 0
+
+:: argument is required ::
+:argfail
+echo Please provide your RabbitMQ node name as the argument to this script.
+
+:: something went wrong ::
+:fail
+endlocal
+EXIT /B 1
diff --git a/deps/rabbit/scripts/rabbitmq-env b/deps/rabbit/scripts/rabbitmq-env
new file mode 100755
index 0000000000..90702c43bb
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-env
@@ -0,0 +1,190 @@
+#!/bin/sh -e
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+if [ "$RABBITMQ_ENV_LOADED" = 1 ]; then
+ return 0;
+fi
+
+if [ -z "$RABBITMQ_SCRIPTS_DIR" ]; then
+ # We set +e here since since our test for "readlink -f" below needs to
+ # be able to fail.
+ set +e
+ # Determine where this script is really located (if this script is
+ # invoked from another script, this is the location of the caller)
+ SCRIPT_PATH="$0"
+ while [ -h "$SCRIPT_PATH" ] ; do
+ # Determine if readlink -f is supported at all. TODO clean this up.
+ FULL_PATH=`readlink -f $SCRIPT_PATH 2>/dev/null`
+ if [ "$?" != "0" ]; then
+ REL_PATH=`readlink $SCRIPT_PATH`
+ if expr "$REL_PATH" : '/.*' > /dev/null; then
+ SCRIPT_PATH="$REL_PATH"
+ else
+ SCRIPT_PATH="`dirname "$SCRIPT_PATH"`/$REL_PATH"
+ fi
+ else
+ SCRIPT_PATH=$FULL_PATH
+ fi
+ done
+ set -e
+
+ RABBITMQ_SCRIPTS_DIR=`dirname $SCRIPT_PATH`
+fi
+
+_rmq_env_now()
+{
+ date '+%Y-%m-%d %H:%M:%S'
+}
+
+_rmq_env_print()
+{
+ _rmq_env_tmp="$1"
+ _rmq_env_tmp_len="${#_rmq_env_tmp}"
+ shift
+ printf '%s %s %s\n' "$(_rmq_env_now)" "$_rmq_env_tmp" "$1" 1>&2
+ shift
+ _rmq_env_print_line=''
+ _rmq_env_indent="$((_rmq_env_tmp_len + 21))"
+ for _rmq_env_print_line in "$@"
+ do
+ printf "%${_rmq_env_indent}s%s\n" ' ' "$_rmq_env_print_line" 1>&2
+ done
+ unset _rmq_env_print_line
+ unset _rmq_env_indent
+ unset _rmq_env_tmp_len
+ unset _rmq_env_tmp
+}
+
+_rmq_env_perr()
+{
+ _rmq_env_print '[error]' "$@"
+}
+
+_rmq_env_pwarn()
+{
+ _rmq_env_print '[warning]' "$@"
+}
+
+rmq_realpath() {
+ local path=$1
+
+ if [ -d "$path" ]; then
+ cd "$path" && pwd
+ elif [ -f "$path" ]; then
+ cd "$(dirname "$path")" && echo $(pwd)/$(basename "$path")
+ else
+ echo "$path"
+ fi
+}
+
+RABBITMQ_HOME="$(rmq_realpath "${RABBITMQ_SCRIPTS_DIR}/..")"
+ESCRIPT_DIR="${RABBITMQ_HOME}/escript"
+
+## Set defaults
+. ${RABBITMQ_SCRIPTS_DIR}/rabbitmq-defaults
+
+# We save the current value of $RABBITMQ_PID_FILE in case it was set by
+# an init script. If $CONF_ENV_FILE overrides it again, we must ignore
+# it and warn the user.
+saved_RABBITMQ_PID_FILE="$RABBITMQ_PID_FILE"
+
+## Get configuration variables from the configure environment file
+[ "x" = "x$RABBITMQ_CONF_ENV_FILE" ] && RABBITMQ_CONF_ENV_FILE=${CONF_ENV_FILE}
+if [ -f "${RABBITMQ_CONF_ENV_FILE}" ]; then
+ CONF_ENV_FILE_PHASE=rabbitmq-env
+ . ${RABBITMQ_CONF_ENV_FILE} || true
+fi
+
+[ -n "$ERL_EPMD_PORT" ] && export ERL_EPMD_PORT
+[ -n "$ERL_EPMD_ADDRESS" ] && export ERL_EPMD_ADDRESS
+
+DEFAULT_SCHEDULER_BIND_TYPE="db"
+[ -n "$SCHEDULER_BIND_TYPE" ] || SCHEDULER_BIND_TYPE="$DEFAULT_SCHEDULER_BIND_TYPE"
+[ -n "$RABBITMQ_SCHEDULER_BIND_TYPE" ] || RABBITMQ_SCHEDULER_BIND_TYPE="$SCHEDULER_BIND_TYPE"
+
+DEFAULT_DISTRIBUTION_BUFFER_SIZE=128000
+[ -n "$DISTRIBUTION_BUFFER_SIZE" ] || DISTRIBUTION_BUFFER_SIZE="$DEFAULT_DISTRIBUTION_BUFFER_SIZE"
+[ -n "$RABBITMQ_DISTRIBUTION_BUFFER_SIZE" ] || RABBITMQ_DISTRIBUTION_BUFFER_SIZE="$DISTRIBUTION_BUFFER_SIZE"
+
+DEFAULT_MAX_NUMBER_OF_PROCESSES=1048576
+[ -n "$MAX_NUMBER_OF_PROCESSES" ] || MAX_NUMBER_OF_PROCESSES="$DEFAULT_MAX_NUMBER_OF_PROCESSES"
+[ -n "$RABBITMQ_MAX_NUMBER_OF_PROCESSES" ] || RABBITMQ_MAX_NUMBER_OF_PROCESSES="$MAX_NUMBER_OF_PROCESSES"
+
+DEFAULT_MAX_NUMBER_OF_ATOMS=5000000
+[ -n "$MAX_NUMBER_OF_ATOMS" ] || MAX_NUMBER_OF_ATOMS="$DEFAULT_MAX_NUMBER_OF_ATOMS"
+[ -n "$RABBITMQ_MAX_NUMBER_OF_ATOMS" ] || RABBITMQ_MAX_NUMBER_OF_ATOMS="$MAX_NUMBER_OF_ATOMS"
+
+## Common server defaults
+SERVER_ERL_ARGS=" +P $RABBITMQ_MAX_NUMBER_OF_PROCESSES +t $RABBITMQ_MAX_NUMBER_OF_ATOMS +stbt $RABBITMQ_SCHEDULER_BIND_TYPE +zdbbl $RABBITMQ_DISTRIBUTION_BUFFER_SIZE "
+
+##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+
+[ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS="$CTL_ERL_ARGS"
+[ "x" = "x$RABBITMQ_CTL_DIST_PORT_MIN" ] && RABBITMQ_CTL_DIST_PORT_MIN="$CTL_DIST_PORT_MIN"
+[ "x" = "x$RABBITMQ_CTL_DIST_PORT_MAX" ] && RABBITMQ_CTL_DIST_PORT_MAX="$CTL_DIST_PORT_MAX"
+[ "x" = "x$RABBITMQ_CTL_DIST_PORT_MIN" ] && RABBITMQ_CTL_DIST_PORT_MIN='35672'
+[ "x" = "x$RABBITMQ_CTL_DIST_PORT_MAX" ] && RABBITMQ_CTL_DIST_PORT_MAX="$(($RABBITMQ_CTL_DIST_PORT_MIN + 10))"
+
+[ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_CODE_PATH" ] && RABBITMQ_SERVER_CODE_PATH=${SERVER_CODE_PATH}
+[ "x" = "x$RABBITMQ_IGNORE_SIGINT" ] && RABBITMQ_IGNORE_SIGINT="true"
+[ "xtrue" = "x$RABBITMQ_IGNORE_SIGINT" ] && RABBITMQ_IGNORE_SIGINT_FLAG="+B i"
+
+if [ -n "$saved_RABBITMQ_PID_FILE" ] && \
+ [ "$saved_RABBITMQ_PID_FILE" != "$RABBITMQ_PID_FILE" ]
+then
+ _rmq_env_pwarn 'RABBITMQ_PID_FILE was already set by the init script to:' \
+ "$saved_RABBITMQ_PID_FILE" \
+ 'The value set in rabbitmq-env.conf is ignored because it would break the init script.'
+
+ RABBITMQ_PID_FILE="$saved_RABBITMQ_PID_FILE"
+fi
+
+[ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
+
+##--- End of overridden <var_name> variables
+
+_rmq_env_set_erl_libs()
+{
+ if [ -n "$ERL_LIBS" ]
+ then
+ export ERL_LIBS="$RABBITMQ_HOME/plugins:$ERL_LIBS"
+ else
+ export ERL_LIBS="$RABBITMQ_HOME/plugins"
+ fi
+}
+
+run_escript()
+{
+ escript_main="${1:?escript_main must be defined}"
+ shift
+ escript="${1:?escript must be defined}"
+ shift
+
+ _rmq_env_set_erl_libs
+
+ # Important: do not quote RABBITMQ_CTL_ERL_ARGS as they must be
+ # word-split
+ # shellcheck disable=SC2086
+ exec erl +B \
+ -boot "$CLEAN_BOOT_FILE" \
+ -noinput -noshell -hidden -smp enable \
+ $RABBITMQ_CTL_ERL_ARGS \
+ -kernel inet_dist_listen_min "$RABBITMQ_CTL_DIST_PORT_MIN" \
+ -kernel inet_dist_listen_max "$RABBITMQ_CTL_DIST_PORT_MAX" \
+ -run escript start \
+ -escript main "$escript_main" \
+ -extra "$escript" "$@"
+}
+
+RABBITMQ_ENV_LOADED=1
+
+# Since we source this elsewhere, don't accidentally stop execution
+true
diff --git a/deps/rabbit/scripts/rabbitmq-env.bat b/deps/rabbit/scripts/rabbitmq-env.bat
new file mode 100644
index 0000000000..1db57b33c5
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-env.bat
@@ -0,0 +1,173 @@
+@echo off
+
+REM Scopes the variables to the current batch file
+REM setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+REM setlocal enabledelayedexpansion
+
+REM SCRIPT_DIR=`dirname $SCRIPT_PATH`
+REM RABBITMQ_HOME="${SCRIPT_DIR}/.."
+set SCRIPT_DIR=%TDP0%
+set SCRIPT_NAME=%1
+for /f "delims=" %%F in ("%SCRIPT_DIR%..") do set RABBITMQ_HOME=%%~dpF%%~nF%%~xF
+
+if defined ERL_LIBS (
+ set "ERL_LIBS=%RABBITMQ_HOME%\plugins;%ERL_LIBS%"
+) else (
+ set "ERL_LIBS=%RABBITMQ_HOME%\plugins"
+)
+
+REM If ERLANG_HOME is not defined, check if "erl.exe" is available in
+REM the path and use that.
+if not defined ERLANG_HOME (
+ for /f "delims=" %%F in ('powershell.exe -NoLogo -NoProfile -NonInteractive -Command "(Get-Command erl.exe).Definition"') do @set ERL_PATH=%%F
+ if exist "!ERL_PATH!" (
+ for /f "delims=" %%F in ("!ERL_PATH!") do set ERL_DIRNAME=%%~dpF
+ for /f "delims=" %%F in ("!ERL_DIRNAME!\..") do @set ERLANG_HOME=%%~dpF%%~nF%%~xF
+ )
+ set ERL_PATH=
+ set ERL_DIRNAME=
+)
+
+REM ## Set defaults
+call "%SCRIPT_DIR%\rabbitmq-defaults.bat"
+
+if "!RABBITMQ_CONF_ENV_FILE!"=="" (
+ set RABBITMQ_CONF_ENV_FILE=!CONF_ENV_FILE:"=!
+) else (
+ set RABBITMQ_CONF_ENV_FILE=!RABBITMQ_CONF_ENV_FILE:"=!
+)
+
+if exist "!RABBITMQ_CONF_ENV_FILE!" (
+ call "!RABBITMQ_CONF_ENV_FILE!"
+)
+
+rem Bump ETS table limit to 50000
+if "!ERL_MAX_ETS_TABLES!"=="" (
+ set ERL_MAX_ETS_TABLES=50000
+)
+
+rem Default is defined here:
+rem https://github.com/erlang/otp/blob/master/erts/emulator/beam/erl_port.h
+if "!ERL_MAX_PORTS!"=="" (
+ set ERL_MAX_PORTS=65536
+)
+
+set DEFAULT_SCHEDULER_BIND_TYPE=db
+if "!RABBITMQ_SCHEDULER_BIND_TYPE!"=="" (
+ set RABBITMQ_SCHEDULER_BIND_TYPE=!SCHEDULER_BIND_TYPE!
+)
+if "!RABBITMQ_SCHEDULER_BIND_TYPE!"=="" (
+ set RABBITMQ_SCHEDULER_BIND_TYPE=!DEFAULT_SCHEDULER_BIND_TYPE!
+)
+
+set DEFAULT_DISTRIBUTION_BUFFER_SIZE=128000
+if "!RABBITMQ_DISTRIBUTION_BUFFER_SIZE!"=="" (
+ set RABBITMQ_DISTRIBUTION_BUFFER_SIZE=!DISTRIBUTION_BUFFER_SIZE!
+)
+if "!RABBITMQ_DISTRIBUTION_BUFFER_SIZE!"=="" (
+ set RABBITMQ_DISTRIBUTION_BUFFER_SIZE=!DEFAULT_DISTRIBUTION_BUFFER_SIZE!
+)
+
+set DEFAULT_MAX_NUMBER_OF_PROCESSES=1048576
+if "!RABBITMQ_MAX_NUMBER_OF_PROCESSES!"=="" (
+ set RABBITMQ_MAX_NUMBER_OF_PROCESSES=!MAX_NUMBER_OF_PROCESSES!
+)
+if "!RABBITMQ_MAX_NUMBER_OF_PROCESSES!"=="" (
+ set RABBITMQ_MAX_NUMBER_OF_PROCESSES=!DEFAULT_MAX_NUMBER_OF_PROCESSES!
+)
+
+set DEFAULT_MAX_NUMBER_OF_ATOMS=5000000
+if "!RABBITMQ_MAX_NUMBER_OF_ATOMS!"=="" (
+ set RABBITMQ_MAX_NUMBER_OF_ATOMS=!MAX_NUMBER_OF_ATOMS!
+)
+if "!RABBITMQ_MAX_NUMBER_OF_ATOMS!"=="" (
+ set RABBITMQ_MAX_NUMBER_OF_ATOMS=!DEFAULT_MAX_NUMBER_OF_ATOMS!
+)
+
+REM Common server defaults
+set SERVER_ERL_ARGS=+P !RABBITMQ_MAX_NUMBER_OF_PROCESSES! +t !RABBITMQ_MAX_NUMBER_OF_ATOMS! +stbt !RABBITMQ_SCHEDULER_BIND_TYPE! +zdbbl !RABBITMQ_DISTRIBUTION_BUFFER_SIZE!
+
+REM ##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+
+REM [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
+if "!RABBITMQ_SERVER_ERL_ARGS!"=="" (
+ set RABBITMQ_SERVER_ERL_ARGS=!SERVER_ERL_ARGS!
+)
+
+REM [ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
+if "!RABBITMQ_SERVER_START_ARGS!"=="" (
+ if not "!SERVER_START_ARGS!"=="" (
+ set RABBITMQ_SERVER_START_ARGS=!SERVER_START_ARGS!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+if "!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS!"=="" (
+ if not "!SERVER_ADDITIONAL_ERL_ARGS!"=="" (
+ set RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=!SERVER_ADDITIONAL_ERL_ARGS!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
+if "!RABBITMQ_BOOT_MODULE!"=="" (
+ if "!BOOT_MODULE!"=="" (
+ set RABBITMQ_BOOT_MODULE=rabbit
+ ) else (
+ set RABBITMQ_BOOT_MODULE=!BOOT_MODULE!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
+if "!RABBITMQ_CTL_ERL_ARGS!"=="" (
+ if not "!CTL_ERL_ARGS!"=="" (
+ set RABBITMQ_CTL_ERL_ARGS=!CTL_ERL_ARGS!
+ )
+)
+
+if "!RABBITMQ_CTL_DIST_PORT_MIN!"=="" (
+ if not "!CTL_DIST_PORT_MIN!"=="" (
+ set RABBITMQ_CTL_DIST_PORT_MIN=!CTL_DIST_PORT_MIN!
+ )
+)
+if "!RABBITMQ_CTL_DIST_PORT_MAX!"=="" (
+ if not "!CTL_DIST_PORT_MAX!"=="" (
+ set RABBITMQ_CTL_DIST_PORT_MAX=!CTL_DIST_PORT_MAX!
+ )
+)
+if "!RABBITMQ_CTL_DIST_PORT_MIN!"=="" (
+ set RABBITMQ_CTL_DIST_PORT_MIN=35672
+)
+if "!RABBITMQ_CTL_DIST_PORT_MAX!"=="" (
+ set /a RABBITMQ_CTL_DIST_PORT_MAX=10+!RABBITMQ_CTL_DIST_PORT_MIN!
+)
+
+REM ADDITIONAL WINDOWS ONLY CONFIG ITEMS
+
+if "!RABBITMQ_SERVICENAME!"=="" (
+ if "!SERVICENAME!"=="" (
+ set RABBITMQ_SERVICENAME=RabbitMQ
+ ) else (
+ set RABBITMQ_SERVICENAME=!SERVICENAME!
+ )
+)
+
+REM Environment cleanup
+set BOOT_MODULE=
+set CONFIG_FILE=
+set FEATURE_FLAGS_FILE=
+set ENABLED_PLUGINS_FILE=
+set LOG_BASE=
+set MNESIA_BASE=
+set PLUGINS_DIR=
+set SCRIPT_DIR=
+set SCRIPT_NAME=
+set TDP0=
+
+REM ##--- End of overridden <var_name> variables
+
+REM # Since we source this elsewhere, don't accidentally stop execution
+REM true
diff --git a/deps/rabbit/scripts/rabbitmq-plugins b/deps/rabbit/scripts/rabbitmq-plugins
new file mode 100755
index 0000000000..1ec15b2ee9
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-plugins
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-plugins "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-plugins.bat b/deps/rabbit/scripts/rabbitmq-plugins.bat
new file mode 100644
index 0000000000..e1f13b7073
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-plugins.bat
@@ -0,0 +1,56 @@
+@echo off
+
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "!TDP0!\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-plugins" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-queues b/deps/rabbit/scripts/rabbitmq-queues
new file mode 100755
index 0000000000..680076f962
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-queues
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-queues "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-queues.bat b/deps/rabbit/scripts/rabbitmq-queues.bat
new file mode 100644
index 0000000000..99fce6479f
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-queues.bat
@@ -0,0 +1,56 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-queues" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-rel b/deps/rabbit/scripts/rabbitmq-rel
new file mode 100755
index 0000000000..a96ec78764
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-rel
@@ -0,0 +1,58 @@
+#!/usr/bin/env escript
+%% vim:ft=erlang:sw=2:et:
+
+main(["show-rel"]) ->
+ Rel = get_rel(),
+ io:format("~p.~n", [Rel]);
+main(["gen-boot"]) ->
+ generate_rel(),
+ generate_boot().
+
+get_rel() ->
+ ok = application:load(rabbit),
+ Apps0 = get_apps(rabbit),
+ Apps1 = lists:sort(
+ fun
+ (_, rabbitmq_prelaunch) -> false;
+ (rabbitmq_prelaunch, _) -> true;
+ (_, mnesia) -> true;
+ (mnesia, _) -> false;
+ (A, B) -> A =< B
+ end, Apps0),
+ Apps = [{App, get_vsn(App)} || App <- Apps1],
+
+ ERTSVersion = erlang:system_info(version),
+ RabbitVersion = get_vsn(rabbit),
+
+ {release,
+ {"RabbitMQ", RabbitVersion},
+ {erts, ERTSVersion},
+ Apps}.
+
+get_apps(App) ->
+ ok = load_app(App),
+ {ok, DirectDeps} = application:get_key(App, applications),
+ lists:umerge(
+ [lists:usort(get_apps(Dep)) || Dep <- DirectDeps] ++
+ [lists:usort([kernel, stdlib, sasl, App, mnesia])]).
+
+load_app(App) ->
+ case application:load(App) of
+ ok -> ok;
+ {error, {already_loaded, App}} -> ok
+ end.
+
+generate_rel() ->
+ Rel = get_rel(),
+ io:format("~p.~n", [Rel]),
+ Output = io_lib:format("~p.~n", [Rel]),
+ ok = file:write_file("rabbit.rel", Output).
+
+generate_boot() ->
+ Options = [local, {path, code:get_path()}],
+ ok = systools:make_script("rabbit", Options).
+
+get_vsn(App) ->
+ load_app(App),
+ {ok, Vsn} = application:get_key(App, vsn),
+ Vsn.
diff --git a/deps/rabbit/scripts/rabbitmq-server b/deps/rabbit/scripts/rabbitmq-server
new file mode 100755
index 0000000000..82058dcb26
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-server
@@ -0,0 +1,155 @@
+#!/bin/sh
+# vim:sw=4:et:
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+set -e
+
+# Get default settings with user overrides for (RABBITMQ_)<var_name>
+# Non-empty defaults should be set in rabbitmq-env
+SCRIPTS_DIR=$(dirname "$0")
+. "$SCRIPTS_DIR/rabbitmq-env"
+
+[ "$NOTIFY_SOCKET" ] && RUNNING_UNDER_SYSTEMD=true
+
+RABBITMQ_DEFAULT_ALLOC_ARGS="+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30"
+
+# Bump ETS table limit to 50000
+if [ "x" = "x$ERL_MAX_ETS_TABLES" ]; then
+ ERL_MAX_ETS_TABLES=50000
+fi
+
+check_start_params() {
+ check_not_empty RABBITMQ_BOOT_MODULE
+ check_not_empty SASL_BOOT_FILE
+}
+
+check_not_empty() {
+ local name="${1:?}"
+ local value
+ eval value=\$$name
+ if [ -z "$value" ]; then
+ echo "Error: ENV variable should be defined: $1.
+ Please check rabbitmq-env, rabbitmq-defaults, and ${RABBITMQ_CONF_ENV_FILE} script files"
+ exit 78
+ fi
+}
+
+start_rabbitmq_server() {
+ set -e
+
+ _rmq_env_set_erl_libs
+
+ RABBITMQ_START_RABBIT=
+ [ "x" = "x$RABBITMQ_ALLOW_INPUT" ] && RABBITMQ_START_RABBIT=" -noinput"
+ if test -z "$RABBITMQ_NODE_ONLY"; then
+ if test "$USE_RABBIT_BOOT_SCRIPT"; then
+ # TODO: This is experimental and undocumented at this point.
+ # It is here just to do simple checks while playing with how
+ # RabbitMQ is started.
+ "$SCRIPTS_DIR/rabbitmq-rel" gen-boot
+ SASL_BOOT_FILE=rabbit
+ test -f "$SASL_BOOT_FILE.boot"
+ RABBITMQ_START_RABBIT="$RABBITMQ_START_RABBIT -init_debug"
+ else
+ RABBITMQ_START_RABBIT="$RABBITMQ_START_RABBIT -s $RABBITMQ_BOOT_MODULE boot"
+ fi
+ fi
+
+ # We need to turn off path expansion because some of the vars,
+ # notably RABBITMQ_SERVER_ERL_ARGS, contain terms that look like
+ # globs and there is no other way of preventing their expansion.
+ set -f
+
+ export ERL_MAX_ETS_TABLES \
+ SYS_PREFIX
+
+ check_start_params
+
+ exec erl \
+ -pa "$RABBITMQ_SERVER_CODE_PATH" \
+ ${RABBITMQ_START_RABBIT} \
+ -boot "${SASL_BOOT_FILE}" \
+ +W w \
+ ${RABBITMQ_DEFAULT_ALLOC_ARGS} \
+ ${RABBITMQ_SERVER_ERL_ARGS} \
+ ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \
+ ${RABBITMQ_SERVER_START_ARGS} \
+ -lager crash_log false \
+ -lager handlers '[]' \
+ "$@"
+}
+
+stop_rabbitmq_server() {
+ if test "$rabbitmq_server_pid"; then
+ kill -TERM "$rabbitmq_server_pid"
+ wait "$rabbitmq_server_pid" || true
+ fi
+}
+
+if [ "$RABBITMQ_ALLOW_INPUT" -o "$RUNNING_UNDER_SYSTEMD" -o "$detached" ]; then
+ # Run erlang VM directly, completely replacing current shell
+ # process - so the pid file written in the code above will be
+ # valid (unless detached, which is also handled in the code
+ # above).
+ #
+ # And also this is the correct mode to run the broker under
+ # systemd - there is no need in a proxy process that converts
+ # signals to graceful shutdown command, the unit file should already
+ # contain instructions for graceful shutdown. Also by removing
+ # this additional process we could simply use value returned by
+ # `os:getpid/0` for a systemd ready notification.
+ start_rabbitmq_server "$@"
+else
+ # When RabbitMQ runs in the foreground but the Erlang shell is
+ # disabled, we setup signal handlers to stop RabbitMQ properly. This
+ # is at least useful in the case of Docker.
+ # The Erlang VM should ignore SIGINT.
+ RABBITMQ_SERVER_START_ARGS="${RABBITMQ_SERVER_START_ARGS} ${RABBITMQ_IGNORE_SIGINT_FLAG}"
+
+ # Signal handlers. They all stop RabbitMQ properly, using
+ # rabbitmqctl stop. This script will exit with different exit codes:
+ # SIGHUP, SIGTSTP + SIGCONT
+ # Ignored until we implement a useful behavior.
+ # SIGTERM
+ # Exits 0 since this is considered a normal process termination.
+ # SIGINT
+ # Exits 128 + $signal_number where $signal_number is 2 for SIGINT (see
+ # https://pubs.opengroup.org/onlinepubs/009695399/utilities/kill.html).
+ # This is considered an abnormal process termination. Normally, we
+ # don't need to specify this exit code because the shell propagates it.
+ # Unfortunately, the signal handler doesn't work as expected in Dash,
+ # thus we need to explicitly restate the exit code.
+ #
+ # The behaviors below should remain consistent with the
+ # equivalent signal handlers in the Erlang code
+ # (see apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl).
+ trap '' HUP TSTP CONT
+ trap "stop_rabbitmq_server; exit 0" TERM
+ trap "stop_rabbitmq_server; exit 130" INT
+
+ start_rabbitmq_server "$@" &
+ export rabbitmq_server_pid=$!
+
+ # Block until RabbitMQ exits or a signal is caught.
+ # Waits for last command (which is start_rabbitmq_server)
+ #
+ # The "|| true" is here to work around an issue with Dash. Normally
+ # in a Bourne shell, if `wait` is interrupted by a signal, the
+ # signal handlers defined above are executed and the script
+ # terminates with the exit code of `wait` (unless the signal handler
+ # overrides that).
+ # In the case of Dash, it looks like `set -e` (set at the beginning
+ # of this script) gets precedence over signal handling. Therefore,
+ # when `wait` is interrupted, its exit code is non-zero and because
+ # of `set -e`, the script terminates immediately without running the
+ # signal handler. To work around this issue, we use "|| true" to
+ # force that statement to succeed and the signal handler to properly
+ # execute. Because the statement below has an exit code of 0, the
+ # signal handler has to restate the expected exit code.
+ wait "$rabbitmq_server_pid" || true
+fi
diff --git a/deps/rabbit/scripts/rabbitmq-server.bat b/deps/rabbit/scripts/rabbitmq-server.bat
new file mode 100644
index 0000000000..3a386b63c4
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-server.bat
@@ -0,0 +1,91 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+set CONF_SCRIPT_DIR=%~dp0
+setlocal enabledelayedexpansion
+setlocal enableextensions
+
+if ERRORLEVEL 1 (
+ echo "Failed to enable command extensions!"
+ exit /B 1
+)
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+set RABBITMQ_DEFAULT_ALLOC_ARGS=+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30
+
+set RABBITMQ_START_RABBIT=
+if "!RABBITMQ_ALLOW_INPUT!"=="" (
+ set RABBITMQ_START_RABBIT=!RABBITMQ_START_RABBIT! -noinput
+)
+if "!RABBITMQ_NODE_ONLY!"=="" (
+ set RABBITMQ_START_RABBIT=!RABBITMQ_START_RABBIT! -s "!RABBITMQ_BOOT_MODULE!" boot
+)
+
+set ENV_OK=true
+CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE!
+
+if "!ENV_OK!"=="false" (
+ EXIT /b 78
+)
+
+if "!RABBITMQ_ALLOW_INPUT!"=="" (
+ set ERL_CMD=erl.exe
+) else (
+ set ERL_CMD=werl.exe
+)
+
+"!ERLANG_HOME!\bin\!ERL_CMD!" ^
+!RABBITMQ_START_RABBIT! ^
+-boot "!SASL_BOOT_FILE!" ^
++W w ^
+!RABBITMQ_DEFAULT_ALLOC_ARGS! ^
+!RABBITMQ_SERVER_ERL_ARGS! ^
+!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
+!RABBITMQ_SERVER_START_ARGS! ^
+-lager crash_log false ^
+-lager handlers "[]" ^
+!STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+:check_not_empty
+if "%~2"=="" (
+ ECHO "Error: ENV variable should be defined: %1. Please check rabbitmq-env and rabbitmq-defaults, and !RABBITMQ_CONF_ENV_FILE! script files. Check also your Environment Variables settings"
+ set ENV_OK=false
+ EXIT /B 78
+ )
+EXIT /B 0
+
+endlocal
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-service.bat b/deps/rabbit/scripts/rabbitmq-service.bat
new file mode 100644
index 0000000000..0b7906d4bf
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-service.bat
@@ -0,0 +1,271 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TN0=%~n0
+set TDP0=%~dp0
+set CONF_SCRIPT_DIR=%~dp0
+set P1=%1
+setlocal enabledelayedexpansion
+setlocal enableextensions
+
+if ERRORLEVEL 1 (
+ echo "Failed to enable command extensions!"
+ exit /B 1
+)
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+REM Check for the short names here too
+if "!RABBITMQ_USE_LONGNAME!"=="true" (
+ set RABBITMQ_NAME_TYPE=-name
+ set NAMETYPE=longnames
+) else (
+ if "!USE_LONGNAME!"=="true" (
+ set RABBITMQ_USE_LONGNAME=true
+ set RABBITMQ_NAME_TYPE=-name
+ set NAMETYPE=longnames
+ ) else (
+ set RABBITMQ_USE_LONGNAME=false
+ set RABBITMQ_NAME_TYPE=-sname
+ set NAMETYPE=shortnames
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
+if "!RABBITMQ_NODENAME!"=="" (
+ if "!NODENAME!"=="" (
+ REM We use Erlang to query the local hostname because
+ REM !COMPUTERNAME! and Erlang may return different results.
+ REM Start erl with -sname to make sure epmd is started.
+ call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -sname rabbit-prelaunch-epmd -eval "init:stop()." >nul 2>&1
+ for /f "delims=" %%F in ('call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -eval "net_kernel:start([list_to_atom(""rabbit-gethostname-"" ++ os:getpid()), %NAMETYPE%]), [_, H] = string:tokens(atom_to_list(node()), ""@""), io:format(""~s~n"", [H]), init:stop()."') do @set HOSTNAME=%%F
+ set RABBITMQ_NODENAME=rabbit@!HOSTNAME!
+ set HOSTNAME=
+ ) else (
+ set RABBITMQ_NODENAME=!NODENAME!
+ )
+)
+set NAMETYPE=
+
+REM Set Erlang distribution port, based on the AMQP TCP port.
+REM
+REM We do this only for the Windows service because in this case, the node has
+REM to start with the distribution enabled on the command line. For all other
+REM cases, distribution is configured at runtime.
+if "!RABBITMQ_NODE_PORT!"=="" (
+ if not "!NODE_PORT!"=="" (
+ set RABBITMQ_NODE_PORT=!NODE_PORT!
+ ) else (
+ set RABBITMQ_NODE_PORT=5672
+ )
+)
+
+if "!RABBITMQ_DIST_PORT!"=="" (
+ if "!DIST_PORT!"=="" (
+ if "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_DIST_PORT=25672
+ ) else (
+ set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
+ )
+ ) else (
+ set RABBITMQ_DIST_PORT=!DIST_PORT!
+ )
+)
+
+set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!
+
+set STARVAR=
+shift
+:loop1
+if "%1"=="" goto after_loop
+ set STARVAR=%STARVAR% %1
+ shift
+goto loop1
+:after_loop
+
+if "!ERLANG_SERVICE_MANAGER_PATH!"=="" (
+ if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B
+ )
+ for /f "delims=" %%i in ('dir /ad/b "!ERLANG_HOME!"') do if exist "!ERLANG_HOME!\%%i\bin\erlsrv.exe" (
+ set ERLANG_SERVICE_MANAGER_PATH=!ERLANG_HOME!\%%i\bin
+ )
+)
+
+set CONSOLE_FLAG=
+set CONSOLE_LOG_VALID=
+for %%i in (new reuse) do if "%%i" == "!RABBITMQ_CONSOLE_LOG!" set CONSOLE_LOG_VALID=TRUE
+if "!CONSOLE_LOG_VALID!" == "TRUE" (
+ set CONSOLE_FLAG=-debugtype !RABBITMQ_CONSOLE_LOG!
+)
+
+rem *** End of configuration ***
+
+if not exist "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" (
+ echo.
+ echo **********************************************
+ echo ERLANG_SERVICE_MANAGER_PATH not set correctly.
+ echo **********************************************
+ echo.
+ echo "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" not found
+ echo Please set ERLANG_SERVICE_MANAGER_PATH to the folder containing "erlsrv.exe".
+ echo.
+ exit /B 1
+)
+
+if "!P1!" == "install" goto INSTALL_SERVICE
+for %%i in (start stop) do if "%%i" == "!P1!" goto START_STOP_SERVICE
+for %%i in (disable enable list remove) do if "%%i" == "!P1!" goto MODIFY_SERVICE
+
+echo.
+echo *********************
+echo Service control usage
+echo *********************
+echo.
+echo !TN0! help - Display this help
+echo !TN0! install - Install the !RABBITMQ_SERVICENAME! service
+echo !TN0! remove - Remove the !RABBITMQ_SERVICENAME! service
+echo.
+echo The following actions can also be accomplished by using
+echo Windows Services Management Console (services.msc):
+echo.
+echo !TN0! start - Start the !RABBITMQ_SERVICENAME! service
+echo !TN0! stop - Stop the !RABBITMQ_SERVICENAME! service
+echo !TN0! disable - Disable the !RABBITMQ_SERVICENAME! service
+echo !TN0! enable - Enable the !RABBITMQ_SERVICENAME! service
+echo.
+exit /B
+
+
+:INSTALL_SERVICE
+
+if not exist "!RABBITMQ_BASE!" (
+ echo Creating base directory !RABBITMQ_BASE! & mkdir "!RABBITMQ_BASE!"
+)
+
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" list !RABBITMQ_SERVICENAME! 2>NUL 1>NUL
+if errorlevel 1 (
+ "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" add !RABBITMQ_SERVICENAME! -internalservicename !RABBITMQ_SERVICENAME!
+) else (
+ echo !RABBITMQ_SERVICENAME! service is already present - only updating service parameters
+)
+
+set RABBITMQ_DEFAULT_ALLOC_ARGS=+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30
+
+set RABBITMQ_START_RABBIT=
+if "!RABBITMQ_NODE_ONLY!"=="" (
+ set RABBITMQ_START_RABBIT=-s "!RABBITMQ_BOOT_MODULE!" boot
+)
+
+if "!RABBITMQ_SERVICE_RESTART!"=="" (
+ set RABBITMQ_SERVICE_RESTART=restart
+)
+
+set ENV_OK=true
+CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE!
+CALL :check_not_empty "RABBITMQ_NAME_TYPE" !RABBITMQ_NAME_TYPE!
+CALL :check_not_empty "RABBITMQ_NODENAME" !RABBITMQ_NODENAME!
+
+if "!ENV_OK!"=="false" (
+ EXIT /b 78
+)
+
+set ERLANG_SERVICE_ARGUMENTS= ^
+!RABBITMQ_START_RABBIT! ^
+-boot "!SASL_BOOT_FILE!" ^
++W w ^
+!RABBITMQ_DEFAULT_ALLOC_ARGS! ^
+!RABBITMQ_SERVER_ERL_ARGS! ^
+!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
+!RABBITMQ_SERVER_START_ARGS! ^
+!RABBITMQ_DIST_ARG! ^
+-lager crash_log false ^
+-lager handlers "[]" ^
+!STARVAR!
+
+set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\!
+set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"!
+
+rem We resolve %APPDATA% at install time so that the user's %APPDATA%
+rem is passed to `rabbit_env` at runtime (instead of the service's
+rem %APPDAT%).
+rem
+rem The goal is to keep the same behavior as when RabbitMQ data
+rem locations were decided in `rabbitmq-env.bat` (sourced by this
+rem script), even if now, we compute everything in `rabbit_env` at
+rem runtime.
+rem
+rem We may revisit this in the future so that no data is stored in a
+rem user-specific directory.
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" set !RABBITMQ_SERVICENAME! ^
+-onfail !RABBITMQ_SERVICE_RESTART! ^
+-machine "!ERLANG_SERVICE_MANAGER_PATH!\erl.exe" ^
+-env APPDATA="!APPDATA!" ^
+-env ERL_LIBS="!ERL_LIBS!" ^
+-env ERL_MAX_ETS_TABLES="!ERL_MAX_ETS_TABLES!" ^
+-env ERL_MAX_PORTS="!ERL_MAX_PORTS!" ^
+-workdir "!RABBITMQ_BASE!" ^
+-stopaction "rabbit:stop_and_halt()." ^
+!RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^
+!CONSOLE_FLAG! ^
+-comment "Multi-protocol open source messaging broker" ^
+-args "!ERLANG_SERVICE_ARGUMENTS!" > NUL
+
+if ERRORLEVEL 1 (
+ EXIT /B 1
+)
+goto END
+
+
+:MODIFY_SERVICE
+
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" !P1! !RABBITMQ_SERVICENAME!
+if ERRORLEVEL 1 (
+ EXIT /B 1
+)
+goto END
+
+
+:START_STOP_SERVICE
+
+REM start and stop via erlsrv reports no error message. Using net instead
+net !P1! !RABBITMQ_SERVICENAME!
+if ERRORLEVEL 1 (
+ EXIT /B 1
+)
+goto END
+
+:END
+
+EXIT /B 0
+
+:check_not_empty
+if "%~2"=="" (
+ ECHO "Error: ENV variable should be defined: %1. Please check rabbitmq-env, rabbitmq-default, and !RABBITMQ_CONF_ENV_FILE! script files. Check also your Environment Variables settings"
+ set ENV_OK=false
+ EXIT /B 78
+ )
+EXIT /B 0
+
+endlocal
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-streams b/deps/rabbit/scripts/rabbitmq-streams
new file mode 100755
index 0000000000..376cc497df
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-streams
@@ -0,0 +1,32 @@
+#!/bin/sh
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 Pivotal Software, Inc. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-streams "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-streams.bat b/deps/rabbit/scripts/rabbitmq-streams.bat
new file mode 100644
index 0000000000..83572a8d62
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-streams.bat
@@ -0,0 +1,63 @@
+@echo off
+REM The contents of this file are subject to the Mozilla Public License
+REM Version 1.1 (the "License"); you may not use this file except in
+REM compliance with the License. You may obtain a copy of the License
+REM at https://www.mozilla.org/MPL/
+REM
+REM Software distributed under the License is distributed on an "AS IS"
+REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+REM the License for the specific language governing rights and
+REM limitations under the License.
+REM
+REM The Original Code is RabbitMQ.
+REM
+REM The Initial Developer of the Original Code is GoPivotal, Inc.
+REM Copyright (c) 2007-2020 Pivotal Software, Inc. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-streams" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-upgrade b/deps/rabbit/scripts/rabbitmq-upgrade
new file mode 100755
index 0000000000..6d2bc3f948
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-upgrade
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-upgrade "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-upgrade.bat b/deps/rabbit/scripts/rabbitmq-upgrade.bat
new file mode 100644
index 0000000000..70b0eeee62
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-upgrade.bat
@@ -0,0 +1,55 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-upgrade" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmqctl b/deps/rabbit/scripts/rabbitmqctl
new file mode 100755
index 0000000000..8016dbe282
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmqctl
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmqctl "$@"
diff --git a/deps/rabbit/scripts/rabbitmqctl.bat b/deps/rabbit/scripts/rabbitmqctl.bat
new file mode 100644
index 0000000000..711ec6e990
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmqctl.bat
@@ -0,0 +1,56 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmqctl" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
+endlocal
diff --git a/deps/rabbit/src/amqqueue.erl b/deps/rabbit/src/amqqueue.erl
new file mode 100644
index 0000000000..3415ebd073
--- /dev/null
+++ b/deps/rabbit/src/amqqueue.erl
@@ -0,0 +1,762 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqqueue). %% Could become amqqueue_v2 in the future.
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([new/8,
+ new/9,
+ new_with_version/9,
+ new_with_version/10,
+ fields/0,
+ fields/1,
+ field_vhost/0,
+ record_version_to_use/0,
+ upgrade/1,
+ upgrade_to/2,
+ % arguments
+ get_arguments/1,
+ set_arguments/2,
+ % decorators
+ get_decorators/1,
+ set_decorators/2,
+ % exclusive_owner
+ get_exclusive_owner/1,
+ % gm_pids
+ get_gm_pids/1,
+ set_gm_pids/2,
+ get_leader/1,
+ % name (#resource)
+ get_name/1,
+ set_name/2,
+ % operator_policy
+ get_operator_policy/1,
+ set_operator_policy/2,
+ get_options/1,
+ % pid
+ get_pid/1,
+ set_pid/2,
+ % policy
+ get_policy/1,
+ set_policy/2,
+ % policy_version
+ get_policy_version/1,
+ set_policy_version/2,
+ % type_state
+ get_type_state/1,
+ set_type_state/2,
+ % recoverable_slaves
+ get_recoverable_slaves/1,
+ set_recoverable_slaves/2,
+ % slave_pids
+ get_slave_pids/1,
+ set_slave_pids/2,
+ % slave_pids_pending_shutdown
+ get_slave_pids_pending_shutdown/1,
+ set_slave_pids_pending_shutdown/2,
+ % state
+ get_state/1,
+ set_state/2,
+ % sync_slave_pids
+ get_sync_slave_pids/1,
+ set_sync_slave_pids/2,
+ get_type/1,
+ get_vhost/1,
+ is_amqqueue/1,
+ is_auto_delete/1,
+ is_durable/1,
+ is_classic/1,
+ is_quorum/1,
+ pattern_match_all/0,
+ pattern_match_on_name/1,
+ pattern_match_on_type/1,
+ reset_mirroring_and_decorators/1,
+ set_immutable/1,
+ qnode/1,
+ macros/0]).
+
+-define(record_version, amqqueue_v2).
+-define(is_backwards_compat_classic(T),
+ (T =:= classic orelse T =:= ?amqqueue_v1_type)).
+
+-record(amqqueue, {
+ name :: rabbit_amqqueue:name() | '_', %% immutable
+ durable :: boolean() | '_', %% immutable
+ auto_delete :: boolean() | '_', %% immutable
+ exclusive_owner = none :: pid() | none | '_', %% immutable
+ arguments = [] :: rabbit_framing:amqp_table() | '_', %% immutable
+ pid :: pid() | ra_server_id() | none | '_', %% durable (just so we
+ %% know home node)
+ slave_pids = [] :: [pid()] | none | '_', %% transient
+ sync_slave_pids = [] :: [pid()] | none| '_',%% transient
+ recoverable_slaves = [] :: [atom()] | none | '_', %% durable
+ policy :: binary() | none | undefined | '_', %% durable, implicit
+ %% update as above
+ operator_policy :: binary() | none | undefined | '_', %% durable,
+ %% implicit
+ %% update
+ %% as above
+ gm_pids = [] :: [{pid(), pid()}] | none | '_', %% transient
+ decorators :: [atom()] | none | undefined | '_', %% transient,
+ %% recalculated
+ %% as above
+ state = live :: atom() | none | '_', %% durable (have we crashed?)
+ policy_version = 0 :: non_neg_integer() | '_',
+ slave_pids_pending_shutdown = [] :: [pid()] | '_',
+ vhost :: rabbit_types:vhost() | undefined | '_', %% secondary index
+ options = #{} :: map() | '_',
+ type = ?amqqueue_v1_type :: module() | '_',
+ type_state = #{} :: map() | '_'
+ }).
+
+-type amqqueue() :: amqqueue_v1:amqqueue_v1() | amqqueue_v2().
+-type amqqueue_v2() :: #amqqueue{
+ name :: rabbit_amqqueue:name(),
+ durable :: boolean(),
+ auto_delete :: boolean(),
+ exclusive_owner :: pid() | none,
+ arguments :: rabbit_framing:amqp_table(),
+ pid :: pid() | ra_server_id() | none,
+ slave_pids :: [pid()] | none,
+ sync_slave_pids :: [pid()] | none,
+ recoverable_slaves :: [atom()] | none,
+ policy :: binary() | none | undefined,
+ operator_policy :: binary() | none | undefined,
+ gm_pids :: [{pid(), pid()}] | none,
+ decorators :: [atom()] | none | undefined,
+ state :: atom() | none,
+ policy_version :: non_neg_integer(),
+ slave_pids_pending_shutdown :: [pid()],
+ vhost :: rabbit_types:vhost() | undefined,
+ options :: map(),
+ type :: atom(),
+ type_state :: #{}
+ }.
+
+-type ra_server_id() :: {Name :: atom(), Node :: node()}.
+
+-type amqqueue_pattern() :: amqqueue_v1:amqqueue_v1_pattern() |
+ amqqueue_v2_pattern().
+-type amqqueue_v2_pattern() :: #amqqueue{
+ name :: rabbit_amqqueue:name() | '_',
+ durable :: '_',
+ auto_delete :: '_',
+ exclusive_owner :: '_',
+ arguments :: '_',
+ pid :: '_',
+ slave_pids :: '_',
+ sync_slave_pids :: '_',
+ recoverable_slaves :: '_',
+ policy :: '_',
+ operator_policy :: '_',
+ gm_pids :: '_',
+ decorators :: '_',
+ state :: '_',
+ policy_version :: '_',
+ slave_pids_pending_shutdown :: '_',
+ vhost :: '_',
+ options :: '_',
+ type :: atom() | '_',
+ type_state :: '_'
+ }.
+
+-export_type([amqqueue/0,
+ amqqueue_v2/0,
+ amqqueue_pattern/0,
+ amqqueue_v2_pattern/0,
+ ra_server_id/0]).
+
+-spec new(rabbit_amqqueue:name(),
+ pid() | ra_server_id() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map()) -> amqqueue().
+
+new(#resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options)
+ when (is_pid(Pid) orelse is_tuple(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) ->
+ new(Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ ?amqqueue_v1_type).
+
+-spec new(rabbit_amqqueue:name(),
+ pid() | ra_server_id() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map(),
+ atom()) -> amqqueue().
+
+new(#resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when (is_pid(Pid) orelse is_tuple(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) andalso
+ is_atom(Type) ->
+ case record_version_to_use() of
+ ?record_version ->
+ new_with_version(
+ ?record_version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type);
+ _ ->
+ amqqueue_v1:new(
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ end.
+
+-spec new_with_version
+(amqqueue_v1 | amqqueue_v2,
+ rabbit_amqqueue:name(),
+ pid() | ra_server_id() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map()) -> amqqueue().
+
+new_with_version(RecordVersion,
+ #resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options)
+ when (is_pid(Pid) orelse is_tuple(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) ->
+ new_with_version(RecordVersion,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ ?amqqueue_v1_type).
+
+-spec new_with_version
+(amqqueue_v1 | amqqueue_v2,
+ rabbit_amqqueue:name(),
+ pid() | ra_server_id() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map(),
+ atom()) -> amqqueue().
+
+new_with_version(?record_version,
+ #resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when (is_pid(Pid) orelse is_tuple(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) andalso
+ is_atom(Type) ->
+ #amqqueue{name = Name,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ arguments = Args,
+ exclusive_owner = Owner,
+ pid = Pid,
+ vhost = VHost,
+ options = Options,
+ type = ensure_type_compat(Type)};
+new_with_version(Version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when ?is_backwards_compat_classic(Type) ->
+ amqqueue_v1:new_with_version(
+ Version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options).
+
+-spec is_amqqueue(any()) -> boolean().
+
+is_amqqueue(#amqqueue{}) -> true;
+is_amqqueue(Queue) -> amqqueue_v1:is_amqqueue(Queue).
+
+-spec record_version_to_use() -> amqqueue_v1 | amqqueue_v2.
+
+record_version_to_use() ->
+ case rabbit_feature_flags:is_enabled(quorum_queue) of
+ true -> ?record_version;
+ false -> amqqueue_v1:record_version_to_use()
+ end.
+
+-spec upgrade(amqqueue()) -> amqqueue().
+
+upgrade(#amqqueue{} = Queue) -> Queue;
+upgrade(OldQueue) -> upgrade_to(record_version_to_use(), OldQueue).
+
+-spec upgrade_to
+(amqqueue_v2, amqqueue()) -> amqqueue_v2();
+(amqqueue_v1, amqqueue_v1:amqqueue_v1()) -> amqqueue_v1:amqqueue_v1().
+
+upgrade_to(?record_version, #amqqueue{} = Queue) ->
+ Queue;
+upgrade_to(?record_version, OldQueue) ->
+ Fields = erlang:tuple_to_list(OldQueue) ++ [?amqqueue_v1_type,
+ undefined],
+ #amqqueue{} = erlang:list_to_tuple(Fields);
+upgrade_to(Version, OldQueue) ->
+ amqqueue_v1:upgrade_to(Version, OldQueue).
+
+% arguments
+
+-spec get_arguments(amqqueue()) -> rabbit_framing:amqp_table().
+
+get_arguments(#amqqueue{arguments = Args}) ->
+ Args;
+get_arguments(Queue) ->
+ amqqueue_v1:get_arguments(Queue).
+
+-spec set_arguments(amqqueue(), rabbit_framing:amqp_table()) -> amqqueue().
+
+set_arguments(#amqqueue{} = Queue, Args) ->
+ Queue#amqqueue{arguments = Args};
+set_arguments(Queue, Args) ->
+ amqqueue_v1:set_arguments(Queue, Args).
+
+% decorators
+
+-spec get_decorators(amqqueue()) -> [atom()] | none | undefined.
+
+get_decorators(#amqqueue{decorators = Decorators}) ->
+ Decorators;
+get_decorators(Queue) ->
+ amqqueue_v1:get_decorators(Queue).
+
+-spec set_decorators(amqqueue(), [atom()] | none | undefined) -> amqqueue().
+
+set_decorators(#amqqueue{} = Queue, Decorators) ->
+ Queue#amqqueue{decorators = Decorators};
+set_decorators(Queue, Decorators) ->
+ amqqueue_v1:set_decorators(Queue, Decorators).
+
+-spec get_exclusive_owner(amqqueue()) -> pid() | none.
+
+get_exclusive_owner(#amqqueue{exclusive_owner = Owner}) ->
+ Owner;
+get_exclusive_owner(Queue) ->
+ amqqueue_v1:get_exclusive_owner(Queue).
+
+% gm_pids
+
+-spec get_gm_pids(amqqueue()) -> [{pid(), pid()}] | none.
+
+get_gm_pids(#amqqueue{gm_pids = GMPids}) ->
+ GMPids;
+get_gm_pids(Queue) ->
+ amqqueue_v1:get_gm_pids(Queue).
+
+-spec set_gm_pids(amqqueue(), [{pid(), pid()}] | none) -> amqqueue().
+
+set_gm_pids(#amqqueue{} = Queue, GMPids) ->
+ Queue#amqqueue{gm_pids = GMPids};
+set_gm_pids(Queue, GMPids) ->
+ amqqueue_v1:set_gm_pids(Queue, GMPids).
+
+-spec get_leader(amqqueue_v2()) -> node().
+
+get_leader(#amqqueue{type = rabbit_quorum_queue, pid = {_, Leader}}) -> Leader.
+
+% operator_policy
+
+-spec get_operator_policy(amqqueue()) -> binary() | none | undefined.
+
+get_operator_policy(#amqqueue{operator_policy = OpPolicy}) -> OpPolicy;
+get_operator_policy(Queue) -> amqqueue_v1:get_operator_policy(Queue).
+
+-spec set_operator_policy(amqqueue(), binary() | none | undefined) ->
+ amqqueue().
+
+set_operator_policy(#amqqueue{} = Queue, Policy) ->
+ Queue#amqqueue{operator_policy = Policy};
+set_operator_policy(Queue, Policy) ->
+ amqqueue_v1:set_operator_policy(Queue, Policy).
+
+% name
+
+-spec get_name(amqqueue()) -> rabbit_amqqueue:name().
+
+get_name(#amqqueue{name = Name}) -> Name;
+get_name(Queue) -> amqqueue_v1:get_name(Queue).
+
+-spec set_name(amqqueue(), rabbit_amqqueue:name()) -> amqqueue().
+
+set_name(#amqqueue{} = Queue, Name) ->
+ Queue#amqqueue{name = Name};
+set_name(Queue, Name) ->
+ amqqueue_v1:set_name(Queue, Name).
+
+-spec get_options(amqqueue()) -> map().
+
+get_options(#amqqueue{options = Options}) -> Options;
+get_options(Queue) -> amqqueue_v1:get_options(Queue).
+
+% pid
+
+-spec get_pid
+(amqqueue_v2()) -> pid() | ra_server_id() | none;
+(amqqueue_v1:amqqueue_v1()) -> pid() | none.
+
+get_pid(#amqqueue{pid = Pid}) -> Pid;
+get_pid(Queue) -> amqqueue_v1:get_pid(Queue).
+
+-spec set_pid
+(amqqueue_v2(), pid() | ra_server_id() | none) -> amqqueue_v2();
+(amqqueue_v1:amqqueue_v1(), pid() | none) -> amqqueue_v1:amqqueue_v1().
+
+set_pid(#amqqueue{} = Queue, Pid) ->
+ Queue#amqqueue{pid = Pid};
+set_pid(Queue, Pid) ->
+ amqqueue_v1:set_pid(Queue, Pid).
+
+% policy
+
+-spec get_policy(amqqueue()) -> proplists:proplist() | none | undefined.
+
+get_policy(#amqqueue{policy = Policy}) -> Policy;
+get_policy(Queue) -> amqqueue_v1:get_policy(Queue).
+
+-spec set_policy(amqqueue(), binary() | none | undefined) -> amqqueue().
+
+set_policy(#amqqueue{} = Queue, Policy) ->
+ Queue#amqqueue{policy = Policy};
+set_policy(Queue, Policy) ->
+ amqqueue_v1:set_policy(Queue, Policy).
+
+% policy_version
+
+-spec get_policy_version(amqqueue()) -> non_neg_integer().
+
+get_policy_version(#amqqueue{policy_version = PV}) ->
+ PV;
+get_policy_version(Queue) ->
+ amqqueue_v1:get_policy_version(Queue).
+
+-spec set_policy_version(amqqueue(), non_neg_integer()) -> amqqueue().
+
+set_policy_version(#amqqueue{} = Queue, PV) ->
+ Queue#amqqueue{policy_version = PV};
+set_policy_version(Queue, PV) ->
+ amqqueue_v1:set_policy_version(Queue, PV).
+
+% recoverable_slaves
+
+-spec get_recoverable_slaves(amqqueue()) -> [atom()] | none.
+
+get_recoverable_slaves(#amqqueue{recoverable_slaves = Slaves}) ->
+ Slaves;
+get_recoverable_slaves(Queue) ->
+ amqqueue_v1:get_recoverable_slaves(Queue).
+
+-spec set_recoverable_slaves(amqqueue(), [atom()] | none) -> amqqueue().
+
+set_recoverable_slaves(#amqqueue{} = Queue, Slaves) ->
+ Queue#amqqueue{recoverable_slaves = Slaves};
+set_recoverable_slaves(Queue, Slaves) ->
+ amqqueue_v1:set_recoverable_slaves(Queue, Slaves).
+
+% type_state (new in v2)
+
+-spec get_type_state(amqqueue()) -> map().
+get_type_state(#amqqueue{type_state = TState}) ->
+ TState;
+get_type_state(_) ->
+ #{}.
+
+-spec set_type_state(amqqueue(), map()) -> amqqueue().
+set_type_state(#amqqueue{} = Queue, TState) ->
+ Queue#amqqueue{type_state = TState};
+set_type_state(Queue, _TState) ->
+ Queue.
+
+% slave_pids
+
+-spec get_slave_pids(amqqueue()) -> [pid()] | none.
+
+get_slave_pids(#amqqueue{slave_pids = Slaves}) ->
+ Slaves;
+get_slave_pids(Queue) ->
+ amqqueue_v1:get_slave_pids(Queue).
+
+-spec set_slave_pids(amqqueue(), [pid()] | none) -> amqqueue().
+
+set_slave_pids(#amqqueue{} = Queue, SlavePids) ->
+ Queue#amqqueue{slave_pids = SlavePids};
+set_slave_pids(Queue, SlavePids) ->
+ amqqueue_v1:set_slave_pids(Queue, SlavePids).
+
+% slave_pids_pending_shutdown
+
+-spec get_slave_pids_pending_shutdown(amqqueue()) -> [pid()].
+
+get_slave_pids_pending_shutdown(
+ #amqqueue{slave_pids_pending_shutdown = Slaves}) ->
+ Slaves;
+get_slave_pids_pending_shutdown(Queue) ->
+ amqqueue_v1:get_slave_pids_pending_shutdown(Queue).
+
+-spec set_slave_pids_pending_shutdown(amqqueue(), [pid()]) -> amqqueue().
+
+set_slave_pids_pending_shutdown(#amqqueue{} = Queue, SlavePids) ->
+ Queue#amqqueue{slave_pids_pending_shutdown = SlavePids};
+set_slave_pids_pending_shutdown(Queue, SlavePids) ->
+ amqqueue_v1:set_slave_pids_pending_shutdown(Queue, SlavePids).
+
+% state
+
+-spec get_state(amqqueue()) -> atom() | none.
+
+get_state(#amqqueue{state = State}) -> State;
+get_state(Queue) -> amqqueue_v1:get_state(Queue).
+
+-spec set_state(amqqueue(), atom() | none) -> amqqueue().
+
+set_state(#amqqueue{} = Queue, State) ->
+ Queue#amqqueue{state = State};
+set_state(Queue, State) ->
+ amqqueue_v1:set_state(Queue, State).
+
+% sync_slave_pids
+
+-spec get_sync_slave_pids(amqqueue()) -> [pid()] | none.
+
+get_sync_slave_pids(#amqqueue{sync_slave_pids = Pids}) ->
+ Pids;
+get_sync_slave_pids(Queue) ->
+ amqqueue_v1:get_sync_slave_pids(Queue).
+
+-spec set_sync_slave_pids(amqqueue(), [pid()] | none) -> amqqueue().
+
+set_sync_slave_pids(#amqqueue{} = Queue, Pids) ->
+ Queue#amqqueue{sync_slave_pids = Pids};
+set_sync_slave_pids(Queue, Pids) ->
+ amqqueue_v1:set_sync_slave_pids(Queue, Pids).
+
+%% New in v2.
+
+-spec get_type(amqqueue()) -> atom().
+
+get_type(#amqqueue{type = Type}) -> Type;
+get_type(Queue) when ?is_amqqueue(Queue) -> ?amqqueue_v1_type.
+
+-spec get_vhost(amqqueue()) -> rabbit_types:vhost() | undefined.
+
+get_vhost(#amqqueue{vhost = VHost}) -> VHost;
+get_vhost(Queue) -> amqqueue_v1:get_vhost(Queue).
+
+-spec is_auto_delete(amqqueue()) -> boolean().
+
+is_auto_delete(#amqqueue{auto_delete = AutoDelete}) ->
+ AutoDelete;
+is_auto_delete(Queue) ->
+ amqqueue_v1:is_auto_delete(Queue).
+
+-spec is_durable(amqqueue()) -> boolean().
+
+is_durable(#amqqueue{durable = Durable}) -> Durable;
+is_durable(Queue) -> amqqueue_v1:is_durable(Queue).
+
+-spec is_classic(amqqueue()) -> boolean().
+
+is_classic(Queue) ->
+ get_type(Queue) =:= ?amqqueue_v1_type.
+
+-spec is_quorum(amqqueue()) -> boolean().
+
+is_quorum(Queue) ->
+ get_type(Queue) =:= rabbit_quorum_queue.
+
+fields() ->
+ case record_version_to_use() of
+ ?record_version -> fields(?record_version);
+ _ -> amqqueue_v1:fields()
+ end.
+
+fields(?record_version) -> record_info(fields, amqqueue);
+fields(Version) -> amqqueue_v1:fields(Version).
+
+field_vhost() ->
+ case record_version_to_use() of
+ ?record_version -> #amqqueue.vhost;
+ _ -> amqqueue_v1:field_vhost()
+ end.
+
+-spec pattern_match_all() -> amqqueue_pattern().
+
+pattern_match_all() ->
+ case record_version_to_use() of
+ ?record_version -> #amqqueue{_ = '_'};
+ _ -> amqqueue_v1:pattern_match_all()
+ end.
+
+-spec pattern_match_on_name(rabbit_amqqueue:name()) -> amqqueue_pattern().
+
+pattern_match_on_name(Name) ->
+ case record_version_to_use() of
+ ?record_version -> #amqqueue{name = Name, _ = '_'};
+ _ -> amqqueue_v1:pattern_match_on_name(Name)
+ end.
+
+-spec pattern_match_on_type(atom()) -> amqqueue_pattern().
+
+pattern_match_on_type(Type) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #amqqueue{type = Type, _ = '_'};
+ _ when ?is_backwards_compat_classic(Type) ->
+ amqqueue_v1:pattern_match_all();
+ %% FIXME: We try a pattern which should never match when the
+ %% `quorum_queue` feature flag is not enabled yet. Is there
+ %% a better solution?
+ _ ->
+ amqqueue_v1:pattern_match_on_name(
+ rabbit_misc:r(<<0>>, queue, <<0>>))
+ end.
+
+-spec reset_mirroring_and_decorators(amqqueue()) -> amqqueue().
+
+reset_mirroring_and_decorators(#amqqueue{} = Queue) ->
+ Queue#amqqueue{slave_pids = [],
+ sync_slave_pids = [],
+ gm_pids = [],
+ decorators = undefined};
+reset_mirroring_and_decorators(Queue) ->
+ amqqueue_v1:reset_mirroring_and_decorators(Queue).
+
+-spec set_immutable(amqqueue()) -> amqqueue().
+
+set_immutable(#amqqueue{} = Queue) ->
+ Queue#amqqueue{pid = none,
+ slave_pids = [],
+ sync_slave_pids = none,
+ recoverable_slaves = none,
+ gm_pids = none,
+ policy = none,
+ decorators = none,
+ state = none};
+set_immutable(Queue) ->
+ amqqueue_v1:set_immutable(Queue).
+
+-spec qnode(amqqueue() | pid() | ra_server_id()) -> node().
+
+qnode(Queue) when ?is_amqqueue(Queue) ->
+ QPid = get_pid(Queue),
+ qnode(QPid);
+qnode(QPid) when is_pid(QPid) ->
+ node(QPid);
+qnode({_, Node}) ->
+ Node.
+
+% private
+
+macros() ->
+ io:format(
+ "-define(is_~s(Q), is_record(Q, amqqueue, ~b)).~n~n",
+ [?record_version, record_info(size, amqqueue)]),
+ %% The field number starts at 2 because the first element is the
+ %% record name.
+ macros(record_info(fields, amqqueue), 2).
+
+macros([Field | Rest], I) ->
+ io:format(
+ "-define(~s_field_~s(Q), element(~b, Q)).~n",
+ [?record_version, Field, I]),
+ macros(Rest, I + 1);
+macros([], _) ->
+ ok.
+
+ensure_type_compat(classic) ->
+ ?amqqueue_v1_type;
+ensure_type_compat(Type) ->
+ Type.
diff --git a/deps/rabbit/src/amqqueue_v1.erl b/deps/rabbit/src/amqqueue_v1.erl
new file mode 100644
index 0000000000..dd1de74a4e
--- /dev/null
+++ b/deps/rabbit/src/amqqueue_v1.erl
@@ -0,0 +1,584 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqqueue_v1).
+
+-include_lib("rabbit_common/include/resource.hrl").
+-include("amqqueue.hrl").
+
+-export([new/8,
+ new/9,
+ new_with_version/9,
+ new_with_version/10,
+ fields/0,
+ fields/1,
+ field_vhost/0,
+ record_version_to_use/0,
+ upgrade/1,
+ upgrade_to/2,
+ % arguments
+ get_arguments/1,
+ set_arguments/2,
+ % decorators
+ get_decorators/1,
+ set_decorators/2,
+ % exclusive_owner
+ get_exclusive_owner/1,
+ % gm_pids
+ get_gm_pids/1,
+ set_gm_pids/2,
+ get_leader/1,
+ % name (#resource)
+ get_name/1,
+ set_name/2,
+ % operator_policy
+ get_operator_policy/1,
+ set_operator_policy/2,
+ get_options/1,
+ % pid
+ get_pid/1,
+ set_pid/2,
+ % policy
+ get_policy/1,
+ set_policy/2,
+ % policy_version
+ get_policy_version/1,
+ set_policy_version/2,
+ % type_state
+ get_type_state/1,
+ set_type_state/2,
+ % recoverable_slaves
+ get_recoverable_slaves/1,
+ set_recoverable_slaves/2,
+ % slave_pids
+ get_slave_pids/1,
+ set_slave_pids/2,
+ % slave_pids_pending_shutdown
+ get_slave_pids_pending_shutdown/1,
+ set_slave_pids_pending_shutdown/2,
+ % state
+ get_state/1,
+ set_state/2,
+ % sync_slave_pids
+ get_sync_slave_pids/1,
+ set_sync_slave_pids/2,
+ get_type/1,
+ get_vhost/1,
+ is_amqqueue/1,
+ is_auto_delete/1,
+ is_durable/1,
+ is_classic/1,
+ is_quorum/1,
+ pattern_match_all/0,
+ pattern_match_on_name/1,
+ pattern_match_on_type/1,
+ reset_mirroring_and_decorators/1,
+ set_immutable/1,
+ qnode/1,
+ macros/0]).
+
+-define(record_version, ?MODULE).
+-define(is_backwards_compat_classic(T),
+ (T =:= classic orelse T =:= ?amqqueue_v1_type)).
+
+-record(amqqueue, {
+ name :: rabbit_amqqueue:name() | '_', %% immutable
+ durable :: boolean() | '_', %% immutable
+ auto_delete :: boolean() | '_', %% immutable
+ exclusive_owner = none :: pid() | none | '_', %% immutable
+ arguments = [] :: rabbit_framing:amqp_table() | '_', %% immutable
+ pid :: pid() | none | '_', %% durable (just so we
+ %% know home node)
+ slave_pids = [] :: [pid()] | none | '_', %% transient
+ sync_slave_pids = [] :: [pid()] | none| '_',%% transient
+ recoverable_slaves = [] :: [atom()] | none | '_', %% durable
+ policy :: binary() | none | undefined | '_', %% durable, implicit
+ %% update as above
+ operator_policy :: binary() | none | undefined | '_', %% durable,
+ %% implicit
+ %% update
+ %% as above
+ gm_pids = [] :: [{pid(), pid()}] | none | '_', %% transient
+ decorators :: [atom()] | none | undefined | '_', %% transient,
+ %% recalculated
+ %% as above
+ state = live :: atom() | none | '_', %% durable (have we crashed?)
+ policy_version = 0 :: non_neg_integer() | '_',
+ slave_pids_pending_shutdown = [] :: [pid()] | '_',
+ vhost :: rabbit_types:vhost() | undefined | '_', %% secondary index
+ options = #{} :: map() | '_'
+ }).
+
+-type amqqueue() :: amqqueue_v1().
+-type amqqueue_v1() :: #amqqueue{
+ name :: rabbit_amqqueue:name(),
+ durable :: boolean(),
+ auto_delete :: boolean(),
+ exclusive_owner :: pid() | none,
+ arguments :: rabbit_framing:amqp_table(),
+ pid :: pid() | none,
+ slave_pids :: [pid()] | none,
+ sync_slave_pids :: [pid()] | none,
+ recoverable_slaves :: [atom()] | none,
+ policy :: binary() | none | undefined,
+ operator_policy :: binary() | none | undefined,
+ gm_pids :: [{pid(), pid()}] | none,
+ decorators :: [atom()] | none | undefined,
+ state :: atom() | none,
+ policy_version :: non_neg_integer(),
+ slave_pids_pending_shutdown :: [pid()],
+ vhost :: rabbit_types:vhost() | undefined,
+ options :: map()
+ }.
+
+-type amqqueue_pattern() :: amqqueue_v1_pattern().
+-type amqqueue_v1_pattern() :: #amqqueue{
+ name :: rabbit_amqqueue:name() | '_',
+ durable :: '_',
+ auto_delete :: '_',
+ exclusive_owner :: '_',
+ arguments :: '_',
+ pid :: '_',
+ slave_pids :: '_',
+ sync_slave_pids :: '_',
+ recoverable_slaves :: '_',
+ policy :: '_',
+ operator_policy :: '_',
+ gm_pids :: '_',
+ decorators :: '_',
+ state :: '_',
+ policy_version :: '_',
+ slave_pids_pending_shutdown :: '_',
+ vhost :: '_',
+ options :: '_'
+ }.
+
+-export_type([amqqueue/0,
+ amqqueue_v1/0,
+ amqqueue_pattern/0,
+ amqqueue_v1_pattern/0]).
+
+-spec new(rabbit_amqqueue:name(),
+ pid() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map()) -> amqqueue().
+
+new(#resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options)
+ when (is_pid(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) ->
+ new_with_version(
+ ?record_version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options).
+
+-spec new(rabbit_amqqueue:name(),
+ pid() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map(),
+ ?amqqueue_v1_type | classic) -> amqqueue().
+
+new(#resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when (is_pid(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) andalso
+ ?is_backwards_compat_classic(Type) ->
+ new(
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options).
+
+-spec new_with_version(amqqueue_v1,
+ rabbit_amqqueue:name(),
+ pid() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map()) -> amqqueue().
+
+new_with_version(?record_version,
+ #resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options)
+ when (is_pid(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) ->
+ #amqqueue{name = Name,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ arguments = Args,
+ exclusive_owner = Owner,
+ pid = Pid,
+ vhost = VHost,
+ options = Options}.
+
+-spec new_with_version(amqqueue_v1,
+ rabbit_amqqueue:name(),
+ pid() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map(),
+ ?amqqueue_v1_type | classic) -> amqqueue().
+
+new_with_version(?record_version,
+ #resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when (is_pid(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) andalso
+ ?is_backwards_compat_classic(Type) ->
+ new_with_version(
+ ?record_version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options).
+
+-spec is_amqqueue(any()) -> boolean().
+
+is_amqqueue(#amqqueue{}) -> true;
+is_amqqueue(_) -> false.
+
+-spec record_version_to_use() -> amqqueue_v1.
+
+record_version_to_use() ->
+ ?record_version.
+
+-spec upgrade(amqqueue()) -> amqqueue().
+
+upgrade(#amqqueue{} = Queue) -> Queue.
+
+-spec upgrade_to(amqqueue_v1, amqqueue()) -> amqqueue().
+
+upgrade_to(?record_version, #amqqueue{} = Queue) ->
+ Queue.
+
+% arguments
+
+-spec get_arguments(amqqueue()) -> rabbit_framing:amqp_table().
+
+get_arguments(#amqqueue{arguments = Args}) -> Args.
+
+-spec set_arguments(amqqueue(), rabbit_framing:amqp_table()) -> amqqueue().
+
+set_arguments(#amqqueue{} = Queue, Args) ->
+ Queue#amqqueue{arguments = Args}.
+
+% decorators
+
+-spec get_decorators(amqqueue()) -> [atom()] | none | undefined.
+
+get_decorators(#amqqueue{decorators = Decorators}) -> Decorators.
+
+-spec set_decorators(amqqueue(), [atom()] | none | undefined) -> amqqueue().
+
+set_decorators(#amqqueue{} = Queue, Decorators) ->
+ Queue#amqqueue{decorators = Decorators}.
+
+-spec get_exclusive_owner(amqqueue()) -> pid() | none.
+
+get_exclusive_owner(#amqqueue{exclusive_owner = Owner}) -> Owner.
+
+% gm_pids
+
+-spec get_gm_pids(amqqueue()) -> [{pid(), pid()}] | none.
+
+get_gm_pids(#amqqueue{gm_pids = GMPids}) -> GMPids.
+
+-spec set_gm_pids(amqqueue(), [{pid(), pid()}] | none) -> amqqueue().
+
+set_gm_pids(#amqqueue{} = Queue, GMPids) ->
+ Queue#amqqueue{gm_pids = GMPids}.
+
+-spec get_leader(amqqueue_v1()) -> no_return().
+
+get_leader(_) -> throw({unsupported, ?record_version, get_leader}).
+
+% operator_policy
+
+-spec get_operator_policy(amqqueue()) -> binary() | none | undefined.
+
+get_operator_policy(#amqqueue{operator_policy = OpPolicy}) -> OpPolicy.
+
+-spec set_operator_policy(amqqueue(), binary() | none | undefined) ->
+ amqqueue().
+
+set_operator_policy(#amqqueue{} = Queue, OpPolicy) ->
+ Queue#amqqueue{operator_policy = OpPolicy}.
+
+% name
+
+-spec get_name(amqqueue()) -> rabbit_amqqueue:name().
+
+get_name(#amqqueue{name = Name}) -> Name.
+
+-spec set_name(amqqueue(), rabbit_amqqueue:name()) -> amqqueue().
+
+set_name(#amqqueue{} = Queue, Name) ->
+ Queue#amqqueue{name = Name}.
+
+-spec get_options(amqqueue()) -> map().
+
+get_options(#amqqueue{options = Options}) -> Options.
+
+% pid
+
+-spec get_pid
+(amqqueue_v1:amqqueue_v1()) -> pid() | none.
+
+get_pid(#amqqueue{pid = Pid}) -> Pid.
+
+-spec set_pid
+(amqqueue_v1:amqqueue_v1(), pid() | none) -> amqqueue_v1:amqqueue_v1().
+
+set_pid(#amqqueue{} = Queue, Pid) ->
+ Queue#amqqueue{pid = Pid}.
+
+% policy
+
+-spec get_policy(amqqueue()) -> proplists:proplist() | none | undefined.
+
+get_policy(#amqqueue{policy = Policy}) -> Policy.
+
+-spec set_policy(amqqueue(), binary() | none | undefined) -> amqqueue().
+
+set_policy(#amqqueue{} = Queue, Policy) ->
+ Queue#amqqueue{policy = Policy}.
+
+% policy_version
+
+-spec get_policy_version(amqqueue()) -> non_neg_integer().
+
+get_policy_version(#amqqueue{policy_version = PV}) ->
+ PV.
+
+-spec set_policy_version(amqqueue(), non_neg_integer()) -> amqqueue().
+
+set_policy_version(#amqqueue{} = Queue, PV) ->
+ Queue#amqqueue{policy_version = PV}.
+
+% recoverable_slaves
+
+-spec get_recoverable_slaves(amqqueue()) -> [atom()] | none.
+
+get_recoverable_slaves(#amqqueue{recoverable_slaves = Slaves}) ->
+ Slaves.
+
+-spec set_recoverable_slaves(amqqueue(), [atom()] | none) -> amqqueue().
+
+set_recoverable_slaves(#amqqueue{} = Queue, Slaves) ->
+ Queue#amqqueue{recoverable_slaves = Slaves}.
+
+% type_state (new in v2)
+
+-spec get_type_state(amqqueue()) -> no_return().
+
+get_type_state(_) -> throw({unsupported, ?record_version, get_type_state}).
+
+-spec set_type_state(amqqueue(), [node()]) -> no_return().
+
+set_type_state(_, _) ->
+ throw({unsupported, ?record_version, set_type_state}).
+
+% slave_pids
+
+get_slave_pids(#amqqueue{slave_pids = Slaves}) ->
+ Slaves.
+
+set_slave_pids(#amqqueue{} = Queue, SlavePids) ->
+ Queue#amqqueue{slave_pids = SlavePids}.
+
+% slave_pids_pending_shutdown
+
+get_slave_pids_pending_shutdown(
+ #amqqueue{slave_pids_pending_shutdown = Slaves}) ->
+ Slaves.
+
+set_slave_pids_pending_shutdown(#amqqueue{} = Queue, SlavePids) ->
+ Queue#amqqueue{slave_pids_pending_shutdown = SlavePids}.
+
+% state
+
+-spec get_state(amqqueue()) -> atom() | none.
+
+get_state(#amqqueue{state = State}) -> State.
+
+-spec set_state(amqqueue(), atom() | none) -> amqqueue().
+
+set_state(#amqqueue{} = Queue, State) ->
+ Queue#amqqueue{state = State}.
+
+% sync_slave_pids
+
+-spec get_sync_slave_pids(amqqueue()) -> [pid()] | none.
+
+get_sync_slave_pids(#amqqueue{sync_slave_pids = Pids}) ->
+ Pids.
+
+-spec set_sync_slave_pids(amqqueue(), [pid()] | none) -> amqqueue().
+
+set_sync_slave_pids(#amqqueue{} = Queue, Pids) ->
+ Queue#amqqueue{sync_slave_pids = Pids}.
+
+%% New in v2.
+
+-spec get_type(amqqueue()) -> atom().
+
+get_type(Queue) when ?is_amqqueue(Queue) -> ?amqqueue_v1_type.
+
+-spec get_vhost(amqqueue()) -> rabbit_types:vhost() | undefined.
+
+get_vhost(#amqqueue{vhost = VHost}) -> VHost.
+
+-spec is_auto_delete(amqqueue()) -> boolean().
+
+is_auto_delete(#amqqueue{auto_delete = AutoDelete}) -> AutoDelete.
+
+-spec is_durable(amqqueue()) -> boolean().
+
+is_durable(#amqqueue{durable = Durable}) -> Durable.
+
+-spec is_classic(amqqueue()) -> boolean().
+
+is_classic(Queue) ->
+ get_type(Queue) =:= ?amqqueue_v1_type.
+
+-spec is_quorum(amqqueue()) -> boolean().
+
+is_quorum(Queue) when ?is_amqqueue(Queue) ->
+ false.
+
+fields() -> fields(?record_version).
+
+fields(?record_version) -> record_info(fields, amqqueue).
+
+field_vhost() -> #amqqueue.vhost.
+
+-spec pattern_match_all() -> amqqueue_pattern().
+
+pattern_match_all() -> #amqqueue{_ = '_'}.
+
+-spec pattern_match_on_name(rabbit_amqqueue:name()) ->
+ amqqueue_pattern().
+
+pattern_match_on_name(Name) -> #amqqueue{name = Name, _ = '_'}.
+
+-spec pattern_match_on_type(atom()) -> no_return().
+
+pattern_match_on_type(_) ->
+ throw({unsupported, ?record_version, pattern_match_on_type}).
+
+reset_mirroring_and_decorators(#amqqueue{} = Queue) ->
+ Queue#amqqueue{slave_pids = [],
+ sync_slave_pids = [],
+ gm_pids = [],
+ decorators = undefined}.
+
+set_immutable(#amqqueue{} = Queue) ->
+ Queue#amqqueue{pid = none,
+ slave_pids = none,
+ sync_slave_pids = none,
+ recoverable_slaves = none,
+ gm_pids = none,
+ policy = none,
+ decorators = none,
+ state = none}.
+
+-spec qnode(amqqueue() | pid()) -> node().
+
+qnode(Queue) when ?is_amqqueue(Queue) ->
+ QPid = get_pid(Queue),
+ qnode(QPid);
+qnode(QPid) when is_pid(QPid) ->
+ node(QPid).
+
+macros() ->
+ io:format(
+ "-define(is_~s(Q), is_record(Q, amqqueue, ~b)).~n~n",
+ [?record_version, record_info(size, amqqueue)]),
+ %% The field number starts at 2 because the first element is the
+ %% record name.
+ macros(record_info(fields, amqqueue), 2).
+
+macros([Field | Rest], I) ->
+ io:format(
+ "-define(~s_field_~s(Q), element(~b, Q)).~n",
+ [?record_version, Field, I]),
+ macros(Rest, I + 1);
+macros([], _) ->
+ ok.
diff --git a/deps/rabbit/src/background_gc.erl b/deps/rabbit/src/background_gc.erl
new file mode 100644
index 0000000000..be5bf0c995
--- /dev/null
+++ b/deps/rabbit/src/background_gc.erl
@@ -0,0 +1,78 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(background_gc).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, run/0]).
+-export([gc/0]). %% For run_interval only
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(MAX_RATIO, 0.01).
+-define(MAX_INTERVAL, 240000).
+
+-record(state, {last_interval}).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> {'ok', pid()} | {'error', any()}.
+
+start_link() -> gen_server2:start_link({local, ?MODULE}, ?MODULE, [],
+ [{timeout, infinity}]).
+
+-spec run() -> 'ok'.
+
+run() -> gen_server2:cast(?MODULE, run).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, IdealInterval} = application:get_env(rabbit, background_gc_target_interval),
+ {ok, interval_gc(#state{last_interval = IdealInterval})}.
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, {unexpected_call, Msg}, State}.
+
+handle_cast(run, State) -> gc(), {noreply, State};
+
+handle_cast(Msg, State) -> {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(run, State) -> {noreply, interval_gc(State)};
+
+handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
+
+terminate(_Reason, State) -> State.
+
+%%----------------------------------------------------------------------------
+
+interval_gc(State = #state{last_interval = LastInterval}) ->
+ {ok, IdealInterval} = application:get_env(rabbit, background_gc_target_interval),
+ {ok, Interval} = rabbit_misc:interval_operation(
+ {?MODULE, gc, []},
+ ?MAX_RATIO, ?MAX_INTERVAL, IdealInterval, LastInterval),
+ erlang:send_after(Interval, self(), run),
+ State#state{last_interval = Interval}.
+
+-spec gc() -> 'ok'.
+
+gc() ->
+ Enabled = rabbit_misc:get_env(rabbit, background_gc_enabled, false),
+ case Enabled of
+ true ->
+ [garbage_collect(P) || P <- processes(),
+ {status, waiting} == process_info(P, status)],
+ %% since we will never be waiting...
+ garbage_collect();
+ false ->
+ ok
+ end,
+ ok.
diff --git a/deps/rabbit/src/code_server_cache.erl b/deps/rabbit/src/code_server_cache.erl
new file mode 100644
index 0000000000..b53f5dcee9
--- /dev/null
+++ b/deps/rabbit/src/code_server_cache.erl
@@ -0,0 +1,81 @@
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+%% ex: ts=4 sw=4 et
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(code_server_cache).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0,
+ maybe_call_mfa/4]).
+
+%% gen_server callbacks
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-record(state, {
+ modules = #{} :: #{atom() => boolean()}
+}).
+
+%% API
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+maybe_call_mfa(Module, Function, Args, Default) ->
+ gen_server:call(?MODULE, {maybe_call_mfa, {Module, Function, Args, Default}}).
+
+%% gen_server callbacks
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call({maybe_call_mfa, {Mod, _F, _A, _D} = MFA}, _From, #state{modules = ModuleMap} = State0) ->
+ Value = maps:get(Mod, ModuleMap, true),
+ {ok, Reply, State1} = handle_maybe_call_mfa(Value, MFA, State0),
+ {reply, Reply, State1};
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% Internal functions
+
+handle_maybe_call_mfa(false, {_M, _F, _A, Default}, State) ->
+ {ok, Default, State};
+handle_maybe_call_mfa(true, {Module, Function, Args, Default}, State) ->
+ try
+ Reply = erlang:apply(Module, Function, Args),
+ {ok, Reply, State}
+ catch
+ error:undef ->
+ handle_maybe_call_mfa_error(Module, Default, State);
+ Err:Reason ->
+ rabbit_log:error("Calling ~p:~p failed: ~p:~p~n",
+ [Module, Function, Err, Reason]),
+ handle_maybe_call_mfa_error(Module, Default, State)
+ end.
+
+handle_maybe_call_mfa_error(Module, Default, #state{modules = ModuleMap0} = State0) ->
+ ModuleMap1 = maps:put(Module, false, ModuleMap0),
+ State1 = State0#state{modules = ModuleMap1},
+ {ok, Default, State1}.
diff --git a/deps/rabbit/src/gatherer.erl b/deps/rabbit/src/gatherer.erl
new file mode 100644
index 0000000000..2b46ec02b1
--- /dev/null
+++ b/deps/rabbit/src/gatherer.erl
@@ -0,0 +1,151 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(gatherer).
+
+%% Gatherer is a queue which has producer and consumer processes. Before producers
+%% push items to the queue using gatherer:in/2 they need to declare their intent
+%% to do so with gatherer:fork/1. When a publisher's work is done, it states so
+%% using gatherer:finish/1.
+%%
+%% Consumers pop messages off queues with gatherer:out/1. If a queue is empty
+%% and there are producers that haven't finished working, the caller is blocked
+%% until an item is available. If there are no active producers, gatherer:out/1
+%% immediately returns 'empty'.
+%%
+%% This module is primarily used to collect results from asynchronous tasks
+%% running in a worker pool, e.g. when recovering bindings or rebuilding
+%% message store indices.
+
+-behaviour(gen_server2).
+
+-export([start_link/0, stop/1, fork/1, finish/1, in/2, sync_in/2, out/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+%%----------------------------------------------------------------------------
+
+-record(gstate, { forks, values, blocked }).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server2:start_link(?MODULE, [], [{timeout, infinity}]).
+
+-spec stop(pid()) -> 'ok'.
+
+stop(Pid) ->
+ unlink(Pid),
+ gen_server2:call(Pid, stop, infinity).
+
+-spec fork(pid()) -> 'ok'.
+
+fork(Pid) ->
+ gen_server2:call(Pid, fork, infinity).
+
+-spec finish(pid()) -> 'ok'.
+
+finish(Pid) ->
+ gen_server2:cast(Pid, finish).
+
+-spec in(pid(), any()) -> 'ok'.
+
+in(Pid, Value) ->
+ gen_server2:cast(Pid, {in, Value}).
+
+-spec sync_in(pid(), any()) -> 'ok'.
+
+sync_in(Pid, Value) ->
+ gen_server2:call(Pid, {in, Value}, infinity).
+
+-spec out(pid()) -> {'value', any()} | 'empty'.
+
+out(Pid) ->
+ gen_server2:call(Pid, out, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #gstate { forks = 0, values = queue:new(), blocked = queue:new() },
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State};
+
+handle_call(fork, _From, State = #gstate { forks = Forks }) ->
+ {reply, ok, State #gstate { forks = Forks + 1 }, hibernate};
+
+handle_call({in, Value}, From, State) ->
+ {noreply, in(Value, From, State), hibernate};
+
+handle_call(out, From, State = #gstate { forks = Forks,
+ values = Values,
+ blocked = Blocked }) ->
+ case queue:out(Values) of
+ {empty, _} when Forks == 0 ->
+ {reply, empty, State, hibernate};
+ {empty, _} ->
+ {noreply, State #gstate { blocked = queue:in(From, Blocked) },
+ hibernate};
+ {{value, {PendingIn, Value}}, NewValues} ->
+ reply(PendingIn, ok),
+ {reply, {value, Value}, State #gstate { values = NewValues },
+ hibernate}
+ end;
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(finish, State = #gstate { forks = Forks, blocked = Blocked }) ->
+ NewForks = Forks - 1,
+ NewBlocked = case NewForks of
+ 0 -> _ = [gen_server2:reply(From, empty) ||
+ From <- queue:to_list(Blocked)],
+ queue:new();
+ _ -> Blocked
+ end,
+ {noreply, State #gstate { forks = NewForks, blocked = NewBlocked },
+ hibernate};
+
+handle_cast({in, Value}, State) ->
+ {noreply, in(Value, undefined, State), hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
+
+%%----------------------------------------------------------------------------
+
+in(Value, From, State = #gstate { values = Values, blocked = Blocked }) ->
+ case queue:out(Blocked) of
+ {empty, _} ->
+ State #gstate { values = queue:in({From, Value}, Values) };
+ {{value, PendingOut}, NewBlocked} ->
+ reply(From, ok),
+ gen_server2:reply(PendingOut, {value, Value}),
+ State #gstate { blocked = NewBlocked }
+ end.
+
+reply(undefined, _Reply) -> ok;
+reply(From, Reply) -> gen_server2:reply(From, Reply).
diff --git a/deps/rabbit/src/gm.erl b/deps/rabbit/src/gm.erl
new file mode 100644
index 0000000000..af24a2958a
--- /dev/null
+++ b/deps/rabbit/src/gm.erl
@@ -0,0 +1,1650 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(gm).
+
+%% Guaranteed Multicast
+%% ====================
+%%
+%% This module provides the ability to create named groups of
+%% processes to which members can be dynamically added and removed,
+%% and for messages to be broadcast within the group that are
+%% guaranteed to reach all members of the group during the lifetime of
+%% the message. The lifetime of a message is defined as being, at a
+%% minimum, the time from which the message is first sent to any
+%% member of the group, up until the time at which it is known by the
+%% member who published the message that the message has reached all
+%% group members.
+%%
+%% The guarantee given is that provided a message, once sent, makes it
+%% to members who do not all leave the group, the message will
+%% continue to propagate to all group members.
+%%
+%% Another way of stating the guarantee is that if member P publishes
+%% messages m and m', then for all members P', if P' is a member of
+%% the group prior to the publication of m, and P' receives m', then
+%% P' will receive m.
+%%
+%% Note that only local-ordering is enforced: i.e. if member P sends
+%% message m and then message m', then for-all members P', if P'
+%% receives m and m', then they will receive m' after m. Causality
+%% ordering is _not_ enforced. I.e. if member P receives message m
+%% and as a result publishes message m', there is no guarantee that
+%% other members P' will receive m before m'.
+%%
+%%
+%% API Use
+%% -------
+%%
+%% Mnesia must be started. Use the idempotent create_tables/0 function
+%% to create the tables required.
+%%
+%% start_link/3
+%% Provide the group name, the callback module name, and any arguments
+%% you wish to be passed into the callback module's functions. The
+%% joined/2 function will be called when we have joined the group,
+%% with the arguments passed to start_link and a list of the current
+%% members of the group. See the callbacks specs and the comments
+%% below for further details of the callback functions.
+%%
+%% leave/1
+%% Provide the Pid. Removes the Pid from the group. The callback
+%% handle_terminate/2 function will be called.
+%%
+%% broadcast/2
+%% Provide the Pid and a Message. The message will be sent to all
+%% members of the group as per the guarantees given above. This is a
+%% cast and the function call will return immediately. There is no
+%% guarantee that the message will reach any member of the group.
+%%
+%% confirmed_broadcast/2
+%% Provide the Pid and a Message. As per broadcast/2 except that this
+%% is a call, not a cast, and only returns 'ok' once the Message has
+%% reached every member of the group. Do not call
+%% confirmed_broadcast/2 directly from the callback module otherwise
+%% you will deadlock the entire group.
+%%
+%% info/1
+%% Provide the Pid. Returns a proplist with various facts, including
+%% the group name and the current group members.
+%%
+%% validate_members/2
+%% Check whether a given member list agrees with the chosen member's
+%% view. Any differences will be communicated via the members_changed
+%% callback. If there are no differences then there will be no reply.
+%% Note that members will not necessarily share the same view.
+%%
+%% forget_group/1
+%% Provide the group name. Removes its mnesia record. Makes no attempt
+%% to ensure the group is empty.
+%%
+%% Implementation Overview
+%% -----------------------
+%%
+%% One possible means of implementation would be a fan-out from the
+%% sender to every member of the group. This would require that the
+%% group is fully connected, and, in the event that the original
+%% sender of the message disappears from the group before the message
+%% has made it to every member of the group, raises questions as to
+%% who is responsible for sending on the message to new group members.
+%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] -
+%% if the sender dies part way through, who is responsible for
+%% ensuring that the remaining Members receive the Msg? In the event
+%% that within the group, messages sent are broadcast from a subset of
+%% the members, the fan-out arrangement has the potential to
+%% substantially impact the CPU and network workload of such members,
+%% as such members would have to accommodate the cost of sending each
+%% message to every group member.
+%%
+%% Instead, if the members of the group are arranged in a chain, then
+%% it becomes easier to reason about who within the group has received
+%% each message and who has not. It eases issues of responsibility: in
+%% the event of a group member disappearing, the nearest upstream
+%% member of the chain is responsible for ensuring that messages
+%% continue to propagate down the chain. It also results in equal
+%% distribution of sending and receiving workload, even if all
+%% messages are being sent from just a single group member. This
+%% configuration has the further advantage that it is not necessary
+%% for every group member to know of every other group member, and
+%% even that a group member does not have to be accessible from all
+%% other group members.
+%%
+%% Performance is kept high by permitting pipelining and all
+%% communication between joined group members is asynchronous. In the
+%% chain A -> B -> C -> D, if A sends a message to the group, it will
+%% not directly contact C or D. However, it must know that D receives
+%% the message (in addition to B and C) before it can consider the
+%% message fully sent. A simplistic implementation would require that
+%% D replies to C, C replies to B and B then replies to A. This would
+%% result in a propagation delay of twice the length of the chain. It
+%% would also require, in the event of the failure of C, that D knows
+%% to directly contact B and issue the necessary replies. Instead, the
+%% chain forms a ring: D sends the message on to A: D does not
+%% distinguish A as the sender, merely as the next member (downstream)
+%% within the chain (which has now become a ring). When A receives
+%% from D messages that A sent, it knows that all members have
+%% received the message. However, the message is not dead yet: if C
+%% died as B was sending to C, then B would need to detect the death
+%% of C and forward the message on to D instead: thus every node has
+%% to remember every message published until it is told that it can
+%% forget about the message. This is essential not just for dealing
+%% with failure of members, but also for the addition of new members.
+%%
+%% Thus once A receives the message back again, it then sends to B an
+%% acknowledgement for the message, indicating that B can now forget
+%% about the message. B does so, and forwards the ack to C. C forgets
+%% the message, and forwards the ack to D, which forgets the message
+%% and finally forwards the ack back to A. At this point, A takes no
+%% further action: the message and its acknowledgement have made it to
+%% every member of the group. The message is now dead, and any new
+%% member joining the group at this point will not receive the
+%% message.
+%%
+%% We therefore have two roles:
+%%
+%% 1. The sender, who upon receiving their own messages back, must
+%% then send out acknowledgements, and upon receiving their own
+%% acknowledgements back perform no further action.
+%%
+%% 2. The other group members who upon receiving messages and
+%% acknowledgements must update their own internal state accordingly
+%% (the sending member must also do this in order to be able to
+%% accommodate failures), and forwards messages on to their downstream
+%% neighbours.
+%%
+%%
+%% Implementation: It gets trickier
+%% --------------------------------
+%%
+%% Chain A -> B -> C -> D
+%%
+%% A publishes a message which B receives. A now dies. B and D will
+%% detect the death of A, and will link up, thus the chain is now B ->
+%% C -> D. B forwards A's message on to C, who forwards it to D, who
+%% forwards it to B. Thus B is now responsible for A's messages - both
+%% publications and acknowledgements that were in flight at the point
+%% at which A died. Even worse is that this is transitive: after B
+%% forwards A's message to C, B dies as well. Now C is not only
+%% responsible for B's in-flight messages, but is also responsible for
+%% A's in-flight messages.
+%%
+%% Lemma 1: A member can only determine which dead members they have
+%% inherited responsibility for if there is a total ordering on the
+%% conflicting additions and subtractions of members from the group.
+%%
+%% Consider the simultaneous death of B and addition of B' that
+%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or
+%% C is responsible for in-flight messages from B. It is easy to
+%% ensure that at least one of them thinks they have inherited B, but
+%% if we do not ensure that exactly one of them inherits B, then we
+%% could have B' converting publishes to acks, which then will crash C
+%% as C does not believe it has issued acks for those messages.
+%%
+%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E
+%% becoming A -> C' -> E. Who has inherited which of B, C and D?
+%%
+%% However, for non-conflicting membership changes, only a partial
+%% ordering is required. For example, A -> B -> C becoming A -> A' ->
+%% B. The addition of A', between A and B can have no conflicts with
+%% the death of C: it is clear that A has inherited C's messages.
+%%
+%% For ease of implementation, we adopt the simple solution, of
+%% imposing a total order on all membership changes.
+%%
+%% On the death of a member, it is ensured the dead member's
+%% neighbours become aware of the death, and the upstream neighbour
+%% now sends to its new downstream neighbour its state, including the
+%% messages pending acknowledgement. The downstream neighbour can then
+%% use this to calculate which publishes and acknowledgements it has
+%% missed out on, due to the death of its old upstream. Thus the
+%% downstream can catch up, and continues the propagation of messages
+%% through the group.
+%%
+%% Lemma 2: When a member is joining, it must synchronously
+%% communicate with its upstream member in order to receive its
+%% starting state atomically with its addition to the group.
+%%
+%% New members must start with the same state as their nearest
+%% upstream neighbour. This ensures that it is not surprised by
+%% acknowledgements they are sent, and that should their downstream
+%% neighbour die, they are able to send the correct state to their new
+%% downstream neighbour to ensure it can catch up. Thus in the
+%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' ->
+%% C, A' must start with the state of A, so that it can send C the
+%% correct state when B dies, allowing C to detect any missed
+%% messages.
+%%
+%% If A' starts by adding itself to the group membership, A could then
+%% die, without A' having received the necessary state from A. This
+%% would leave A' responsible for in-flight messages from A, but
+%% having the least knowledge of all, of those messages. Thus A' must
+%% start by synchronously calling A, which then immediately sends A'
+%% back its state. A then adds A' to the group. If A dies at this
+%% point then A' will be able to see this (as A' will fail to appear
+%% in the group membership), and thus A' will ignore the state it
+%% receives from A, and will simply repeat the process, trying to now
+%% join downstream from some other member. This ensures that should
+%% the upstream die as soon as the new member has been joined, the new
+%% member is guaranteed to receive the correct state, allowing it to
+%% correctly process messages inherited due to the death of its
+%% upstream neighbour.
+%%
+%% The canonical definition of the group membership is held by a
+%% distributed database. Whilst this allows the total ordering of
+%% changes to be achieved, it is nevertheless undesirable to have to
+%% query this database for the current view, upon receiving each
+%% message. Instead, we wish for members to be able to cache a view of
+%% the group membership, which then requires a cache invalidation
+%% mechanism. Each member maintains its own view of the group
+%% membership. Thus when the group's membership changes, members may
+%% need to become aware of such changes in order to be able to
+%% accurately process messages they receive. Because of the
+%% requirement of a total ordering of conflicting membership changes,
+%% it is not possible to use the guaranteed broadcast mechanism to
+%% communicate these changes: to achieve the necessary ordering, it
+%% would be necessary for such messages to be published by exactly one
+%% member, which can not be guaranteed given that such a member could
+%% die.
+%%
+%% The total ordering we enforce on membership changes gives rise to a
+%% view version number: every change to the membership creates a
+%% different view, and the total ordering permits a simple
+%% monotonically increasing view version number.
+%%
+%% Lemma 3: If a message is sent from a member that holds view version
+%% N, it can be correctly processed by any member receiving the
+%% message with a view version >= N.
+%%
+%% Initially, let us suppose that each view contains the ordering of
+%% every member that was ever part of the group. Dead members are
+%% marked as such. Thus we have a ring of members, some of which are
+%% dead, and are thus inherited by the nearest alive downstream
+%% member.
+%%
+%% In the chain A -> B -> C, all three members initially have view
+%% version 1, which reflects reality. B publishes a message, which is
+%% forward by C to A. B now dies, which A notices very quickly. Thus A
+%% updates the view, creating version 2. It now forwards B's
+%% publication, sending that message to its new downstream neighbour,
+%% C. This happens before C is aware of the death of B. C must become
+%% aware of the view change before it interprets the message its
+%% received, otherwise it will fail to learn of the death of B, and
+%% thus will not realise it has inherited B's messages (and will
+%% likely crash).
+%%
+%% Thus very simply, we have that each subsequent view contains more
+%% information than the preceding view.
+%%
+%% However, to avoid the views growing indefinitely, we need to be
+%% able to delete members which have died _and_ for which no messages
+%% are in-flight. This requires that upon inheriting a dead member, we
+%% know the last publication sent by the dead member (this is easy: we
+%% inherit a member because we are the nearest downstream member which
+%% implies that we know at least as much than everyone else about the
+%% publications of the dead member), and we know the earliest message
+%% for which the acknowledgement is still in flight.
+%%
+%% In the chain A -> B -> C, when B dies, A will send to C its state
+%% (as C is the new downstream from A), allowing C to calculate which
+%% messages it has missed out on (described above). At this point, C
+%% also inherits B's messages. If that state from A also includes the
+%% last message published by B for which an acknowledgement has been
+%% seen, then C knows exactly which further acknowledgements it must
+%% receive (also including issuing acknowledgements for publications
+%% still in-flight that it receives), after which it is known there
+%% are no more messages in flight for B, thus all evidence that B was
+%% ever part of the group can be safely removed from the canonical
+%% group membership.
+%%
+%% Thus, for every message that a member sends, it includes with that
+%% message its view version. When a member receives a message it will
+%% update its view from the canonical copy, should its view be older
+%% than the view version included in the message it has received.
+%%
+%% The state held by each member therefore includes the messages from
+%% each publisher pending acknowledgement, the last publication seen
+%% from that publisher, and the last acknowledgement from that
+%% publisher. In the case of the member's own publications or
+%% inherited members, this last acknowledgement seen state indicates
+%% the last acknowledgement retired, rather than sent.
+%%
+%%
+%% Proof sketch
+%% ------------
+%%
+%% We need to prove that with the provided operational semantics, we
+%% can never reach a state that is not well formed from a well-formed
+%% starting state.
+%%
+%% Operational semantics (small step): straight-forward message
+%% sending, process monitoring, state updates.
+%%
+%% Well formed state: dead members inherited by exactly one non-dead
+%% member; for every entry in anyone's pending-acks, either (the
+%% publication of the message is in-flight downstream from the member
+%% and upstream from the publisher) or (the acknowledgement of the
+%% message is in-flight downstream from the publisher and upstream
+%% from the member).
+%%
+%% Proof by induction on the applicable operational semantics.
+%%
+%%
+%% Related work
+%% ------------
+%%
+%% The ring configuration and double traversal of messages around the
+%% ring is similar (though developed independently) to the LCR
+%% protocol by [Levy 2008]. However, LCR differs in several
+%% ways. Firstly, by using vector clocks, it enforces a total order of
+%% message delivery, which is unnecessary for our purposes. More
+%% significantly, it is built on top of a "group communication system"
+%% which performs the group management functions, taking
+%% responsibility away from the protocol as to how to cope with safely
+%% adding and removing members. When membership changes do occur, the
+%% protocol stipulates that every member must perform communication
+%% with every other member of the group, to ensure all outstanding
+%% deliveries complete, before the entire group transitions to the new
+%% view. This, in total, requires two sets of all-to-all synchronous
+%% communications.
+%%
+%% This is not only rather inefficient, but also does not explain what
+%% happens upon the failure of a member during this process. It does
+%% though entirely avoid the need for inheritance of responsibility of
+%% dead members that our protocol incorporates.
+%%
+%% In [Marandi et al 2010], a Paxos-based protocol is described. This
+%% work explicitly focuses on the efficiency of communication. LCR
+%% (and our protocol too) are more efficient, but at the cost of
+%% higher latency. The Ring-Paxos protocol is itself built on top of
+%% IP-multicast, which rules it out for many applications where
+%% point-to-point communication is all that can be required. They also
+%% have an excellent related work section which I really ought to
+%% read...
+%%
+%%
+%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008.
+%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast
+%% Protocol
+
+
+-behaviour(gen_server2).
+
+-export([create_tables/0, start_link/4, leave/1, broadcast/2, broadcast/3,
+ confirmed_broadcast/2, info/1, validate_members/2, forget_group/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, prioritise_info/3]).
+
+%% For INSTR_MOD callbacks
+-export([call/3, cast/2, monitor/1, demonitor/1]).
+
+-export([table_definitions/0]).
+
+-define(GROUP_TABLE, gm_group).
+-define(MAX_BUFFER_SIZE, 100000000). %% 100MB
+-define(BROADCAST_TIMER, 25).
+-define(FORCE_GC_TIMER, 250).
+-define(VERSION_START, 0).
+-define(SETS, ordsets).
+
+-record(state,
+ { self,
+ left,
+ right,
+ group_name,
+ module,
+ view,
+ pub_count,
+ members_state,
+ callback_args,
+ confirms,
+ broadcast_buffer,
+ broadcast_buffer_sz,
+ broadcast_timer,
+ force_gc_timer,
+ txn_executor,
+ shutting_down
+ }).
+
+-record(gm_group, { name, version, members }).
+
+-record(view_member, { id, aliases, left, right }).
+
+-record(member, { pending_ack, last_pub, last_ack }).
+
+-define(TABLE, {?GROUP_TABLE, [{record_name, gm_group},
+ {attributes, record_info(fields, gm_group)}]}).
+-define(TABLE_MATCH, {match, #gm_group { _ = '_' }}).
+
+-define(TAG, '$gm').
+
+-export_type([group_name/0]).
+
+-type group_name() :: any().
+-type txn_fun() :: fun((fun(() -> any())) -> any()).
+
+%% The joined, members_changed and handle_msg callbacks can all return
+%% any of the following terms:
+%%
+%% 'ok' - the callback function returns normally
+%%
+%% {'stop', Reason} - the callback indicates the member should stop
+%% with reason Reason and should leave the group.
+%%
+%% {'become', Module, Args} - the callback indicates that the callback
+%% module should be changed to Module and that the callback functions
+%% should now be passed the arguments Args. This allows the callback
+%% module to be dynamically changed.
+
+%% Called when we've successfully joined the group. Supplied with Args
+%% provided in start_link, plus current group members.
+-callback joined(Args :: term(), Members :: [pid()]) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Supplied with Args provided in start_link, the list of new members
+%% and the list of members previously known to us that have since
+%% died. Note that if a member joins and dies very quickly, it's
+%% possible that we will never see that member appear in either births
+%% or deaths. However we are guaranteed that (1) we will see a member
+%% joining either in the births here, or in the members passed to
+%% joined/2 before receiving any messages from it; and (2) we will not
+%% see members die that we have not seen born (or supplied in the
+%% members to joined/2).
+-callback members_changed(Args :: term(),
+ Births :: [pid()], Deaths :: [pid()]) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Supplied with Args provided in start_link, the sender, and the
+%% message. This does get called for messages injected by this member,
+%% however, in such cases, there is no special significance of this
+%% invocation: it does not indicate that the message has made it to
+%% any other members, let alone all other members.
+-callback handle_msg(Args :: term(), From :: pid(), Message :: term()) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Called on gm member termination as per rules in gen_server, with
+%% the Args provided in start_link plus the termination Reason.
+-callback handle_terminate(Args :: term(), Reason :: term()) ->
+ ok | term().
+
+-spec create_tables() -> 'ok' | {'aborted', any()}.
+
+create_tables() ->
+ create_tables([?TABLE]).
+
+create_tables([]) ->
+ ok;
+create_tables([{Table, Attributes} | Tables]) ->
+ case mnesia:create_table(Table, Attributes) of
+ {atomic, ok} -> create_tables(Tables);
+ {aborted, {already_exists, Table}} -> create_tables(Tables);
+ Err -> Err
+ end.
+
+table_definitions() ->
+ {Name, Attributes} = ?TABLE,
+ [{Name, [?TABLE_MATCH | Attributes]}].
+
+-spec start_link(group_name(), atom(), any(), txn_fun()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(GroupName, Module, Args, TxnFun) ->
+ gen_server2:start_link(?MODULE, [GroupName, Module, Args, TxnFun],
+ [{spawn_opt, [{fullsweep_after, 0}]}]).
+
+-spec leave(pid()) -> 'ok'.
+
+leave(Server) ->
+ gen_server2:cast(Server, leave).
+
+-spec broadcast(pid(), any()) -> 'ok'.
+
+broadcast(Server, Msg) -> broadcast(Server, Msg, 0).
+
+broadcast(Server, Msg, SizeHint) ->
+ gen_server2:cast(Server, {broadcast, Msg, SizeHint}).
+
+-spec confirmed_broadcast(pid(), any()) -> 'ok'.
+
+confirmed_broadcast(Server, Msg) ->
+ gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity).
+
+-spec info(pid()) -> rabbit_types:infos().
+
+info(Server) ->
+ gen_server2:call(Server, info, infinity).
+
+-spec validate_members(pid(), [pid()]) -> 'ok'.
+
+validate_members(Server, Members) ->
+ gen_server2:cast(Server, {validate_members, Members}).
+
+-spec forget_group(group_name()) -> 'ok'.
+
+forget_group(GroupName) ->
+ {atomic, ok} = mnesia:sync_transaction(
+ fun () ->
+ mnesia:delete({?GROUP_TABLE, GroupName})
+ end),
+ ok.
+
+init([GroupName, Module, Args, TxnFun]) ->
+ put(process_name, {?MODULE, GroupName}),
+ Self = make_member(GroupName),
+ gen_server2:cast(self(), join),
+ {ok, #state { self = Self,
+ left = {Self, undefined},
+ right = {Self, undefined},
+ group_name = GroupName,
+ module = Module,
+ view = undefined,
+ pub_count = -1,
+ members_state = undefined,
+ callback_args = Args,
+ confirms = queue:new(),
+ broadcast_buffer = [],
+ broadcast_buffer_sz = 0,
+ broadcast_timer = undefined,
+ force_gc_timer = undefined,
+ txn_executor = TxnFun,
+ shutting_down = false }}.
+
+
+handle_call({confirmed_broadcast, _Msg}, _From,
+ State = #state { shutting_down = {true, _} }) ->
+ reply(shutting_down, State);
+
+handle_call({confirmed_broadcast, _Msg}, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_joined, State);
+
+handle_call({confirmed_broadcast, Msg}, _From,
+ State = #state { self = Self,
+ right = {Self, undefined},
+ module = Module,
+ callback_args = Args }) ->
+ handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
+ ok, State});
+
+handle_call({confirmed_broadcast, Msg}, From, State) ->
+ {Result, State1 = #state { pub_count = PubCount, confirms = Confirms }} =
+ internal_broadcast(Msg, 0, State),
+ Confirms1 = queue:in({PubCount, From}, Confirms),
+ handle_callback_result({Result, flush_broadcast_buffer(
+ State1 #state { confirms = Confirms1 })});
+
+handle_call(info, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_joined, State);
+
+handle_call(info, _From, State = #state { group_name = GroupName,
+ module = Module,
+ view = View }) ->
+ reply([{group_name, GroupName},
+ {module, Module},
+ {group_members, get_pids(alive_view_members(View))}], State);
+
+handle_call({add_on_right, _NewMember}, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_ready, State);
+
+handle_call({add_on_right, NewMember}, _From,
+ State = #state { self = Self,
+ group_name = GroupName,
+ members_state = MembersState,
+ txn_executor = TxnFun }) ->
+ try
+ Group = record_new_member_in_group(
+ NewMember, Self, GroupName, TxnFun),
+ View1 = group_to_view(check_membership(Self, Group)),
+ MembersState1 = remove_erased_members(MembersState, View1),
+ ok = send_right(NewMember, View1,
+ {catchup, Self, prepare_members_state(MembersState1)}),
+ {Result, State1} = change_view(View1, State #state {
+ members_state = MembersState1 }),
+ handle_callback_result({Result, {ok, Group}, State1})
+ catch
+ lost_membership ->
+ {stop, shutdown, State}
+ end.
+
+%% add_on_right causes a catchup to be sent immediately from the left,
+%% so we can never see this from the left neighbour. However, it's
+%% possible for the right neighbour to send us a check_neighbours
+%% immediately before that. We can't possibly handle it, but if we're
+%% in this state we know a catchup is coming imminently anyway. So
+%% just ignore it.
+handle_cast({?TAG, _ReqVer, check_neighbours},
+ State = #state { members_state = undefined }) ->
+ noreply(State);
+
+handle_cast({?TAG, ReqVer, Msg},
+ State = #state { view = View,
+ self = Self,
+ members_state = MembersState,
+ group_name = GroupName }) ->
+ try
+ {Result, State1} =
+ case needs_view_update(ReqVer, View) of
+ true ->
+ View1 = group_to_view(
+ check_membership(Self,
+ dirty_read_group(GroupName))),
+ MemberState1 = remove_erased_members(MembersState, View1),
+ change_view(View1, State #state {
+ members_state = MemberState1 });
+ false -> {ok, State}
+ end,
+ handle_callback_result(
+ if_callback_success(
+ Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1))
+ catch
+ lost_membership ->
+ {stop, shutdown, State}
+ end;
+
+handle_cast({broadcast, _Msg, _SizeHint},
+ State = #state { shutting_down = {true, _} }) ->
+ noreply(State);
+
+handle_cast({broadcast, _Msg, _SizeHint},
+ State = #state { members_state = undefined }) ->
+ noreply(State);
+
+handle_cast({broadcast, Msg, _SizeHint},
+ State = #state { self = Self,
+ right = {Self, undefined},
+ module = Module,
+ callback_args = Args }) ->
+ handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
+ State});
+
+handle_cast({broadcast, Msg, SizeHint}, State) ->
+ {Result, State1} = internal_broadcast(Msg, SizeHint, State),
+ handle_callback_result({Result, maybe_flush_broadcast_buffer(State1)});
+
+handle_cast(join, State = #state { self = Self,
+ group_name = GroupName,
+ members_state = undefined,
+ module = Module,
+ callback_args = Args,
+ txn_executor = TxnFun }) ->
+ try
+ View = join_group(Self, GroupName, TxnFun),
+ MembersState =
+ case alive_view_members(View) of
+ [Self] -> blank_member_state();
+ _ -> undefined
+ end,
+ State1 = check_neighbours(State #state { view = View,
+ members_state = MembersState }),
+ handle_callback_result(
+ {Module:joined(Args, get_pids(all_known_members(View))), State1})
+ catch
+ lost_membership ->
+ {stop, shutdown, State}
+ end;
+
+handle_cast({validate_members, OldMembers},
+ State = #state { view = View,
+ module = Module,
+ callback_args = Args }) ->
+ NewMembers = get_pids(all_known_members(View)),
+ Births = NewMembers -- OldMembers,
+ Deaths = OldMembers -- NewMembers,
+ case {Births, Deaths} of
+ {[], []} -> noreply(State);
+ _ -> Result = Module:members_changed(Args, Births, Deaths),
+ handle_callback_result({Result, State})
+ end;
+
+handle_cast(leave, State) ->
+ {stop, normal, State}.
+
+
+handle_info(force_gc, State) ->
+ garbage_collect(),
+ noreply(State #state { force_gc_timer = undefined });
+
+handle_info(flush, State) ->
+ noreply(
+ flush_broadcast_buffer(State #state { broadcast_timer = undefined }));
+
+handle_info(timeout, State) ->
+ noreply(flush_broadcast_buffer(State));
+
+handle_info({'DOWN', _MRef, process, _Pid, _Reason},
+ State = #state { shutting_down =
+ {true, {shutdown, ring_shutdown}} }) ->
+ noreply(State);
+handle_info({'DOWN', MRef, process, _Pid, Reason},
+ State = #state { self = Self,
+ left = Left,
+ right = Right,
+ group_name = GroupName,
+ confirms = Confirms,
+ txn_executor = TxnFun }) ->
+ try
+ check_membership(GroupName),
+ Member = case {Left, Right} of
+ {{Member1, MRef}, _} -> Member1;
+ {_, {Member1, MRef}} -> Member1;
+ _ -> undefined
+ end,
+ case {Member, Reason} of
+ {undefined, _} ->
+ noreply(State);
+ {_, {shutdown, ring_shutdown}} ->
+ noreply(State);
+ _ ->
+ %% In the event of a partial partition we could see another member
+ %% go down and then remove them from Mnesia. While they can
+ %% recover from this they'd have to restart the queue - not
+ %% ideal. So let's sleep here briefly just in case this was caused
+ %% by a partial partition; in which case by the time we record the
+ %% member death in Mnesia we will probably be in a full
+ %% partition and will not be assassinating another member.
+ timer:sleep(100),
+ View1 = group_to_view(record_dead_member_in_group(Self,
+ Member, GroupName, TxnFun, true)),
+ handle_callback_result(
+ case alive_view_members(View1) of
+ [Self] -> maybe_erase_aliases(
+ State #state {
+ members_state = blank_member_state(),
+ confirms = purge_confirms(Confirms) },
+ View1);
+ _ -> change_view(View1, State)
+ end)
+ end
+ catch
+ lost_membership ->
+ {stop, shutdown, State}
+ end;
+handle_info(_, State) ->
+ %% Discard any unexpected messages, such as late replies from neighbour_call/2
+ %% TODO: For #gm_group{} related info messages, it could be worthwhile to
+ %% change_view/2, as this might reflect an alteration in the gm group, meaning
+ %% we now need to update our state. see rabbitmq-server#914.
+ noreply(State).
+
+terminate(Reason, #state { module = Module, callback_args = Args }) ->
+ Module:handle_terminate(Args, Reason).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+prioritise_info(flush, _Len, _State) ->
+ 1;
+%% DOWN messages should not overtake initial catchups; if they do we
+%% will receive a DOWN we do not know what to do with.
+prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len,
+ #state { members_state = undefined }) ->
+ 0;
+%% We should not prioritise DOWN messages from our left since
+%% otherwise the DOWN can overtake any last activity from the left,
+%% causing that activity to be lost.
+prioritise_info({'DOWN', _MRef, process, LeftPid, _Reason}, _Len,
+ #state { left = {{_LeftVer, LeftPid}, _MRef2} }) ->
+ 0;
+%% But prioritise all other DOWNs - we want to make sure we are not
+%% sending activity into the void for too long because our right is
+%% down but we don't know it.
+prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len, _State) ->
+ 1;
+prioritise_info(_, _Len, _State) ->
+ 0.
+
+
+handle_msg(check_neighbours, State) ->
+ %% no-op - it's already been done by the calling handle_cast
+ {ok, State};
+
+handle_msg({catchup, Left, MembersStateLeft},
+ State = #state { self = Self,
+ left = {Left, _MRefL},
+ right = {Right, _MRefR},
+ view = View,
+ members_state = undefined }) ->
+ ok = send_right(Right, View, {catchup, Self, MembersStateLeft}),
+ MembersStateLeft1 = build_members_state(MembersStateLeft),
+ {ok, State #state { members_state = MembersStateLeft1 }};
+
+handle_msg({catchup, Left, MembersStateLeft},
+ State = #state { self = Self,
+ left = {Left, _MRefL},
+ view = View,
+ members_state = MembersState })
+ when MembersState =/= undefined ->
+ MembersStateLeft1 = build_members_state(MembersStateLeft),
+ AllMembers = lists:usort(maps:keys(MembersState) ++
+ maps:keys(MembersStateLeft1)),
+ {MembersState1, Activity} =
+ lists:foldl(
+ fun (Id, MembersStateActivity) ->
+ #member { pending_ack = PALeft, last_ack = LA } =
+ find_member_or_blank(Id, MembersStateLeft1),
+ with_member_acc(
+ fun (#member { pending_ack = PA } = Member, Activity1) ->
+ case is_member_alias(Id, Self, View) of
+ true ->
+ {_AcksInFlight, Pubs, _PA1} =
+ find_prefix_common_suffix(PALeft, PA),
+ {Member #member { last_ack = LA },
+ activity_cons(Id, pubs_from_queue(Pubs),
+ [], Activity1)};
+ false ->
+ {Acks, _Common, Pubs} =
+ find_prefix_common_suffix(PA, PALeft),
+ {Member,
+ activity_cons(Id, pubs_from_queue(Pubs),
+ acks_from_queue(Acks),
+ Activity1)}
+ end
+ end, Id, MembersStateActivity)
+ end, {MembersState, activity_nil()}, AllMembers),
+ handle_msg({activity, Left, activity_finalise(Activity)},
+ State #state { members_state = MembersState1 });
+
+handle_msg({catchup, _NotLeft, _MembersState}, State) ->
+ {ok, State};
+
+handle_msg({activity, Left, Activity},
+ State = #state { self = Self,
+ group_name = GroupName,
+ left = {Left, _MRefL},
+ view = View,
+ members_state = MembersState,
+ confirms = Confirms })
+ when MembersState =/= undefined ->
+ try
+ %% If we have to stop, do it asap so we avoid any ack confirmation
+ %% Membership must be checked again by erase_members_in_group, as the
+ %% node can be marked as dead on the meanwhile
+ check_membership(GroupName),
+ {MembersState1, {Confirms1, Activity1}} =
+ calculate_activity(MembersState, Confirms, Activity, Self, View),
+ State1 = State #state { members_state = MembersState1,
+ confirms = Confirms1 },
+ Activity3 = activity_finalise(Activity1),
+ ok = maybe_send_activity(Activity3, State1),
+ {Result, State2} = maybe_erase_aliases(State1, View),
+ if_callback_success(
+ Result, fun activity_true/3, fun activity_false/3, Activity3, State2)
+ catch
+ lost_membership ->
+ {{stop, shutdown}, State}
+ end;
+
+handle_msg({activity, _NotLeft, _Activity}, State) ->
+ {ok, State}.
+
+
+noreply(State) ->
+ {noreply, ensure_timers(State), flush_timeout(State)}.
+
+reply(Reply, State) ->
+ {reply, Reply, ensure_timers(State), flush_timeout(State)}.
+
+ensure_timers(State) ->
+ ensure_force_gc_timer(ensure_broadcast_timer(State)).
+
+flush_timeout(#state{broadcast_buffer = []}) -> infinity;
+flush_timeout(_) -> 0.
+
+ensure_force_gc_timer(State = #state { force_gc_timer = TRef })
+ when is_reference(TRef) ->
+ State;
+ensure_force_gc_timer(State = #state { force_gc_timer = undefined }) ->
+ TRef = erlang:send_after(?FORCE_GC_TIMER, self(), force_gc),
+ State #state { force_gc_timer = TRef }.
+
+ensure_broadcast_timer(State = #state { broadcast_buffer = [],
+ broadcast_timer = undefined }) ->
+ State;
+ensure_broadcast_timer(State = #state { broadcast_buffer = [],
+ broadcast_timer = TRef }) ->
+ _ = erlang:cancel_timer(TRef),
+ State #state { broadcast_timer = undefined };
+ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) ->
+ TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush),
+ State #state { broadcast_timer = TRef };
+ensure_broadcast_timer(State) ->
+ State.
+
+internal_broadcast(Msg, SizeHint,
+ State = #state { self = Self,
+ pub_count = PubCount,
+ module = Module,
+ callback_args = Args,
+ broadcast_buffer = Buffer,
+ broadcast_buffer_sz = BufferSize }) ->
+ PubCount1 = PubCount + 1,
+ {Module:handle_msg(Args, get_pid(Self), Msg),
+ State #state { pub_count = PubCount1,
+ broadcast_buffer = [{PubCount1, Msg} | Buffer],
+ broadcast_buffer_sz = BufferSize + SizeHint}}.
+
+%% The Erlang distribution mechanism has an interesting quirk - it
+%% will kill the VM cold with "Absurdly large distribution output data
+%% buffer" if you attempt to send a message which serialises out to
+%% more than 2^31 bytes in size. It's therefore a very good idea to
+%% make sure that we don't exceed that size!
+%%
+%% Now, we could figure out the size of messages as they come in using
+%% size(term_to_binary(Msg)) or similar. The trouble is, that requires
+%% us to serialise the message only to throw the serialised form
+%% away. Hard to believe that's a sensible thing to do. So instead we
+%% accept a size hint from the application, via broadcast/3. This size
+%% hint can be the size of anything in the message which we expect
+%% could be large, and we just ignore the size of any small bits of
+%% the message term. Therefore MAX_BUFFER_SIZE is set somewhat
+%% conservatively at 100MB - but the buffer is only to allow us to
+%% buffer tiny messages anyway, so 100MB is plenty.
+
+maybe_flush_broadcast_buffer(State = #state{broadcast_buffer_sz = Size}) ->
+ case Size > ?MAX_BUFFER_SIZE of
+ true -> flush_broadcast_buffer(State);
+ false -> State
+ end.
+
+flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) ->
+ State;
+flush_broadcast_buffer(State = #state { self = Self,
+ members_state = MembersState,
+ broadcast_buffer = Buffer,
+ pub_count = PubCount }) ->
+ [{PubCount, _Msg}|_] = Buffer, %% ASSERTION match on PubCount
+ Pubs = lists:reverse(Buffer),
+ Activity = activity_cons(Self, Pubs, [], activity_nil()),
+ ok = maybe_send_activity(activity_finalise(Activity), State),
+ MembersState1 = with_member(
+ fun (Member = #member { pending_ack = PA }) ->
+ PA1 = queue:join(PA, queue:from_list(Pubs)),
+ Member #member { pending_ack = PA1,
+ last_pub = PubCount }
+ end, Self, MembersState),
+ State #state { members_state = MembersState1,
+ broadcast_buffer = [],
+ broadcast_buffer_sz = 0 }.
+
+%% ---------------------------------------------------------------------------
+%% View construction and inspection
+%% ---------------------------------------------------------------------------
+
+needs_view_update(ReqVer, {Ver, _View}) -> Ver < ReqVer.
+
+view_version({Ver, _View}) -> Ver.
+
+is_member_alive({dead, _Member}) -> false;
+is_member_alive(_) -> true.
+
+is_member_alias(Self, Self, _View) ->
+ true;
+is_member_alias(Member, Self, View) ->
+ ?SETS:is_element(Member,
+ ((fetch_view_member(Self, View)) #view_member.aliases)).
+
+dead_member_id({dead, Member}) -> Member.
+
+store_view_member(VMember = #view_member { id = Id }, {Ver, View}) ->
+ {Ver, maps:put(Id, VMember, View)}.
+
+with_view_member(Fun, View, Id) ->
+ store_view_member(Fun(fetch_view_member(Id, View)), View).
+
+fetch_view_member(Id, {_Ver, View}) -> maps:get(Id, View).
+
+find_view_member(Id, {_Ver, View}) -> maps:find(Id, View).
+
+blank_view(Ver) -> {Ver, maps:new()}.
+
+alive_view_members({_Ver, View}) -> maps:keys(View).
+
+all_known_members({_Ver, View}) ->
+ maps:fold(
+ fun (Member, #view_member { aliases = Aliases }, Acc) ->
+ ?SETS:to_list(Aliases) ++ [Member | Acc]
+ end, [], View).
+
+group_to_view(#gm_group { members = Members, version = Ver }) ->
+ Alive = lists:filter(fun is_member_alive/1, Members),
+ [_|_] = Alive, %% ASSERTION - can't have all dead members
+ add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members).
+
+link_view([Left, Middle, Right | Rest], View) ->
+ case find_view_member(Middle, View) of
+ error ->
+ link_view(
+ [Middle, Right | Rest],
+ store_view_member(#view_member { id = Middle,
+ aliases = ?SETS:new(),
+ left = Left,
+ right = Right }, View));
+ {ok, _} ->
+ View
+ end;
+link_view(_, View) ->
+ View.
+
+add_aliases(View, Members) ->
+ Members1 = ensure_alive_suffix(Members),
+ {EmptyDeadSet, View1} =
+ lists:foldl(
+ fun (Member, {DeadAcc, ViewAcc}) ->
+ case is_member_alive(Member) of
+ true ->
+ {?SETS:new(),
+ with_view_member(
+ fun (VMember =
+ #view_member { aliases = Aliases }) ->
+ VMember #view_member {
+ aliases = ?SETS:union(Aliases, DeadAcc) }
+ end, ViewAcc, Member)};
+ false ->
+ {?SETS:add_element(dead_member_id(Member), DeadAcc),
+ ViewAcc}
+ end
+ end, {?SETS:new(), View}, Members1),
+ 0 = ?SETS:size(EmptyDeadSet), %% ASSERTION
+ View1.
+
+ensure_alive_suffix(Members) ->
+ queue:to_list(ensure_alive_suffix1(queue:from_list(Members))).
+
+ensure_alive_suffix1(MembersQ) ->
+ {{value, Member}, MembersQ1} = queue:out_r(MembersQ),
+ case is_member_alive(Member) of
+ true -> MembersQ;
+ false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1))
+ end.
+
+
+%% ---------------------------------------------------------------------------
+%% View modification
+%% ---------------------------------------------------------------------------
+
+join_group(Self, GroupName, TxnFun) ->
+ join_group(Self, GroupName, dirty_read_group(GroupName), TxnFun).
+
+join_group(Self, GroupName, {error, not_found}, TxnFun) ->
+ join_group(Self, GroupName,
+ prune_or_create_group(Self, GroupName, TxnFun), TxnFun);
+join_group(Self, _GroupName, #gm_group { members = [Self] } = Group, _TxnFun) ->
+ group_to_view(Group);
+join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) ->
+ case lists:member(Self, Members) of
+ true ->
+ group_to_view(Group);
+ false ->
+ case lists:filter(fun is_member_alive/1, Members) of
+ [] ->
+ join_group(Self, GroupName,
+ prune_or_create_group(Self, GroupName, TxnFun),
+ TxnFun);
+ Alive ->
+ Left = lists:nth(rand:uniform(length(Alive)), Alive),
+ Handler =
+ fun () ->
+ join_group(
+ Self, GroupName,
+ record_dead_member_in_group(Self,
+ Left, GroupName, TxnFun, false),
+ TxnFun)
+ end,
+ try
+ case neighbour_call(Left, {add_on_right, Self}) of
+ {ok, Group1} -> group_to_view(Group1);
+ not_ready -> join_group(Self, GroupName, TxnFun)
+ end
+ catch
+ exit:{R, _}
+ when R =:= noproc; R =:= normal; R =:= shutdown ->
+ Handler();
+ exit:{{R, _}, _}
+ when R =:= nodedown; R =:= shutdown ->
+ Handler()
+ end
+ end
+ end.
+
+dirty_read_group(GroupName) ->
+ case mnesia:dirty_read(?GROUP_TABLE, GroupName) of
+ [] -> {error, not_found};
+ [Group] -> Group
+ end.
+
+read_group(GroupName) ->
+ case mnesia:read({?GROUP_TABLE, GroupName}) of
+ [] -> {error, not_found};
+ [Group] -> Group
+ end.
+
+write_group(Group) -> mnesia:write(?GROUP_TABLE, Group, write), Group.
+
+prune_or_create_group(Self, GroupName, TxnFun) ->
+ TxnFun(
+ fun () ->
+ GroupNew = #gm_group { name = GroupName,
+ members = [Self],
+ version = get_version(Self) },
+ case read_group(GroupName) of
+ {error, not_found} ->
+ write_group(GroupNew);
+ Group = #gm_group { members = Members } ->
+ case lists:any(fun is_member_alive/1, Members) of
+ true -> Group;
+ false -> write_group(GroupNew)
+ end
+ end
+ end).
+
+record_dead_member_in_group(Self, Member, GroupName, TxnFun, Verify) ->
+ Fun =
+ fun () ->
+ try
+ Group = #gm_group { members = Members, version = Ver } =
+ case Verify of
+ true ->
+ check_membership(Self, read_group(GroupName));
+ false ->
+ check_group(read_group(GroupName))
+ end,
+ case lists:splitwith(
+ fun (Member1) -> Member1 =/= Member end, Members) of
+ {_Members1, []} -> %% not found - already recorded dead
+ Group;
+ {Members1, [Member | Members2]} ->
+ Members3 = Members1 ++ [{dead, Member} | Members2],
+ write_group(Group #gm_group { members = Members3,
+ version = Ver + 1 })
+ end
+ catch
+ lost_membership ->
+ %% The transaction must not be abruptly crashed, but
+ %% leave the gen_server to stop normally
+ {error, lost_membership}
+ end
+ end,
+ handle_lost_membership_in_txn(TxnFun, Fun).
+
+handle_lost_membership_in_txn(TxnFun, Fun) ->
+ case TxnFun(Fun) of
+ {error, lost_membership = T} ->
+ throw(T);
+ Any ->
+ Any
+ end.
+
+record_new_member_in_group(NewMember, Left, GroupName, TxnFun) ->
+ Fun =
+ fun () ->
+ try
+ Group = #gm_group { members = Members, version = Ver } =
+ check_membership(Left, read_group(GroupName)),
+ case lists:member(NewMember, Members) of
+ true ->
+ %% This avois duplicates during partial partitions,
+ %% as inconsistent views might happen during them
+ rabbit_log:warning("(~p) GM avoiding duplicate of ~p",
+ [self(), NewMember]),
+ Group;
+ false ->
+ {Prefix, [Left | Suffix]} =
+ lists:splitwith(fun (M) -> M =/= Left end, Members),
+ write_group(Group #gm_group {
+ members = Prefix ++ [Left, NewMember | Suffix],
+ version = Ver + 1 })
+ end
+ catch
+ lost_membership ->
+ %% The transaction must not be abruptly crashed, but
+ %% leave the gen_server to stop normally
+ {error, lost_membership}
+ end
+ end,
+ handle_lost_membership_in_txn(TxnFun, Fun).
+
+erase_members_in_group(Self, Members, GroupName, TxnFun) ->
+ DeadMembers = [{dead, Id} || Id <- Members],
+ Fun =
+ fun () ->
+ try
+ Group = #gm_group { members = [_|_] = Members1, version = Ver } =
+ check_membership(Self, read_group(GroupName)),
+ case Members1 -- DeadMembers of
+ Members1 -> Group;
+ Members2 -> write_group(
+ Group #gm_group { members = Members2,
+ version = Ver + 1 })
+ end
+ catch
+ lost_membership ->
+ %% The transaction must not be abruptly crashed, but
+ %% leave the gen_server to stop normally
+ {error, lost_membership}
+ end
+ end,
+ handle_lost_membership_in_txn(TxnFun, Fun).
+
+maybe_erase_aliases(State = #state { self = Self,
+ group_name = GroupName,
+ members_state = MembersState,
+ txn_executor = TxnFun }, View) ->
+ #view_member { aliases = Aliases } = fetch_view_member(Self, View),
+ {Erasable, MembersState1}
+ = ?SETS:fold(
+ fun (Id, {ErasableAcc, MembersStateAcc} = Acc) ->
+ #member { last_pub = LP, last_ack = LA } =
+ find_member_or_blank(Id, MembersState),
+ case can_erase_view_member(Self, Id, LA, LP) of
+ true -> {[Id | ErasableAcc],
+ erase_member(Id, MembersStateAcc)};
+ false -> Acc
+ end
+ end, {[], MembersState}, Aliases),
+ View1 = case Erasable of
+ [] -> View;
+ _ -> group_to_view(
+ erase_members_in_group(Self, Erasable, GroupName, TxnFun))
+ end,
+ change_view(View1, State #state { members_state = MembersState1 }).
+
+can_erase_view_member(Self, Self, _LA, _LP) -> false;
+can_erase_view_member(_Self, _Id, N, N) -> true;
+can_erase_view_member(_Self, _Id, _LA, _LP) -> false.
+
+neighbour_cast(N, Msg) -> ?INSTR_MOD:cast(get_pid(N), Msg).
+neighbour_call(N, Msg) -> ?INSTR_MOD:call(get_pid(N), Msg, infinity).
+
+%% ---------------------------------------------------------------------------
+%% View monitoring and maintenance
+%% ---------------------------------------------------------------------------
+
+ensure_neighbour(_Ver, Self, {Self, undefined}, Self) ->
+ {Self, undefined};
+ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) ->
+ ok = neighbour_cast(RealNeighbour, {?TAG, Ver, check_neighbours}),
+ {RealNeighbour, maybe_monitor(RealNeighbour, Self)};
+ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) ->
+ {RealNeighbour, MRef};
+ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) ->
+ true = ?INSTR_MOD:demonitor(MRef),
+ Msg = {?TAG, Ver, check_neighbours},
+ ok = neighbour_cast(RealNeighbour, Msg),
+ ok = case Neighbour of
+ Self -> ok;
+ _ -> neighbour_cast(Neighbour, Msg)
+ end,
+ {Neighbour, maybe_monitor(Neighbour, Self)}.
+
+maybe_monitor( Self, Self) -> undefined;
+maybe_monitor(Other, _Self) -> ?INSTR_MOD:monitor(get_pid(Other)).
+
+check_neighbours(State = #state { self = Self,
+ left = Left,
+ right = Right,
+ view = View,
+ broadcast_buffer = Buffer }) ->
+ #view_member { left = VLeft, right = VRight }
+ = fetch_view_member(Self, View),
+ Ver = view_version(View),
+ Left1 = ensure_neighbour(Ver, Self, Left, VLeft),
+ Right1 = ensure_neighbour(Ver, Self, Right, VRight),
+ Buffer1 = case Right1 of
+ {Self, undefined} -> [];
+ _ -> Buffer
+ end,
+ State1 = State #state { left = Left1, right = Right1,
+ broadcast_buffer = Buffer1 },
+ ok = maybe_send_catchup(Right, State1),
+ State1.
+
+maybe_send_catchup(Right, #state { right = Right }) ->
+ ok;
+maybe_send_catchup(_Right, #state { self = Self,
+ right = {Self, undefined} }) ->
+ ok;
+maybe_send_catchup(_Right, #state { members_state = undefined }) ->
+ ok;
+maybe_send_catchup(_Right, #state { self = Self,
+ right = {Right, _MRef},
+ view = View,
+ members_state = MembersState }) ->
+ send_right(Right, View,
+ {catchup, Self, prepare_members_state(MembersState)}).
+
+
+%% ---------------------------------------------------------------------------
+%% Catch_up delta detection
+%% ---------------------------------------------------------------------------
+
+find_prefix_common_suffix(A, B) ->
+ {Prefix, A1} = find_prefix(A, B, queue:new()),
+ {Common, Suffix} = find_common(A1, B, queue:new()),
+ {Prefix, Common, Suffix}.
+
+%% Returns the elements of A that occur before the first element of B,
+%% plus the remainder of A.
+find_prefix(A, B, Prefix) ->
+ case {queue:out(A), queue:out(B)} of
+ {{{value, Val}, _A1}, {{value, Val}, _B1}} ->
+ {Prefix, A};
+ {{empty, A1}, {{value, _A}, _B1}} ->
+ {Prefix, A1};
+ {{{value, {NumA, _MsgA} = Val}, A1},
+ {{value, {NumB, _MsgB}}, _B1}} when NumA < NumB ->
+ find_prefix(A1, B, queue:in(Val, Prefix));
+ {_, {empty, _B1}} ->
+ {A, Prefix} %% Prefix well be empty here
+ end.
+
+%% A should be a prefix of B. Returns the commonality plus the
+%% remainder of B.
+find_common(A, B, Common) ->
+ case {queue:out(A), queue:out(B)} of
+ {{{value, Val}, A1}, {{value, Val}, B1}} ->
+ find_common(A1, B1, queue:in(Val, Common));
+ {{empty, _A}, _} ->
+ {Common, B};
+ %% Drop value from B.
+ %% Match value to avoid infinite loop, since {empty, B} = queue:out(B).
+ {_, {{value, _}, B1}} ->
+ find_common(A, B1, Common);
+ %% Drop value from A. Empty A should be matched by second close.
+ {{{value, _}, A1}, _} ->
+ find_common(A1, B, Common)
+ end.
+
+
+%% ---------------------------------------------------------------------------
+%% Members helpers
+%% ---------------------------------------------------------------------------
+
+with_member(Fun, Id, MembersState) ->
+ store_member(
+ Id, Fun(find_member_or_blank(Id, MembersState)), MembersState).
+
+with_member_acc(Fun, Id, {MembersState, Acc}) ->
+ {MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc),
+ {store_member(Id, MemberState, MembersState), Acc1}.
+
+find_member_or_blank(Id, MembersState) ->
+ case maps:find(Id, MembersState) of
+ {ok, Result} -> Result;
+ error -> blank_member()
+ end.
+
+erase_member(Id, MembersState) -> maps:remove(Id, MembersState).
+
+blank_member() ->
+ #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }.
+
+blank_member_state() -> maps:new().
+
+store_member(Id, MemberState, MembersState) ->
+ maps:put(Id, MemberState, MembersState).
+
+prepare_members_state(MembersState) -> maps:to_list(MembersState).
+
+build_members_state(MembersStateList) -> maps:from_list(MembersStateList).
+
+make_member(GroupName) ->
+ {case dirty_read_group(GroupName) of
+ #gm_group { version = Version } -> Version;
+ {error, not_found} -> ?VERSION_START
+ end, self()}.
+
+remove_erased_members(MembersState, View) ->
+ lists:foldl(fun (Id, MembersState1) ->
+ store_member(Id, find_member_or_blank(Id, MembersState),
+ MembersState1)
+ end, blank_member_state(), all_known_members(View)).
+
+get_version({Version, _Pid}) -> Version.
+
+get_pid({_Version, Pid}) -> Pid.
+
+get_pids(Ids) -> [Pid || {_Version, Pid} <- Ids].
+
+%% ---------------------------------------------------------------------------
+%% Activity assembly
+%% ---------------------------------------------------------------------------
+
+activity_nil() -> queue:new().
+
+activity_cons( _Id, [], [], Tail) -> Tail;
+activity_cons(Sender, Pubs, Acks, Tail) -> queue:in({Sender, Pubs, Acks}, Tail).
+
+activity_finalise(Activity) -> queue:to_list(Activity).
+
+maybe_send_activity([], _State) ->
+ ok;
+maybe_send_activity(Activity, #state { self = Self,
+ right = {Right, _MRefR},
+ view = View }) ->
+ send_right(Right, View, {activity, Self, Activity}).
+
+send_right(Right, View, Msg) ->
+ ok = neighbour_cast(Right, {?TAG, view_version(View), Msg}).
+
+calculate_activity(MembersState, Confirms, Activity, Self, View) ->
+ lists:foldl(
+ fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) ->
+ with_member_acc(
+ fun (Member = #member { pending_ack = PA,
+ last_pub = LP,
+ last_ack = LA },
+ {Confirms2, Activity2}) ->
+ case is_member_alias(Id, Self, View) of
+ true ->
+ {ToAck, PA1} =
+ find_common(queue_from_pubs(Pubs), PA,
+ queue:new()),
+ LA1 = last_ack(Acks, LA),
+ AckNums = acks_from_queue(ToAck),
+ Confirms3 = maybe_confirm(
+ Self, Id, Confirms2, AckNums),
+ {Member #member { pending_ack = PA1,
+ last_ack = LA1 },
+ {Confirms3,
+ activity_cons(
+ Id, [], AckNums, Activity2)}};
+ false ->
+ PA1 = apply_acks(Acks, join_pubs(PA, Pubs)),
+ LA1 = last_ack(Acks, LA),
+ LP1 = last_pub(Pubs, LP),
+ {Member #member { pending_ack = PA1,
+ last_pub = LP1,
+ last_ack = LA1 },
+ {Confirms2,
+ activity_cons(Id, Pubs, Acks, Activity2)}}
+ end
+ end, Id, MembersStateConfirmsActivity)
+ end, {MembersState, {Confirms, activity_nil()}}, Activity).
+
+callback(Args, Module, Activity) ->
+ Result =
+ lists:foldl(
+ fun ({Id, Pubs, _Acks}, {Args1, Module1, ok}) ->
+ lists:foldl(fun ({_PubNum, Pub}, Acc = {Args2, Module2, ok}) ->
+ case Module2:handle_msg(
+ Args2, get_pid(Id), Pub) of
+ ok ->
+ Acc;
+ {become, Module3, Args3} ->
+ {Args3, Module3, ok};
+ {stop, _Reason} = Error ->
+ Error
+ end;
+ (_, Error = {stop, _Reason}) ->
+ Error
+ end, {Args1, Module1, ok}, Pubs);
+ (_, Error = {stop, _Reason}) ->
+ Error
+ end, {Args, Module, ok}, Activity),
+ case Result of
+ {Args, Module, ok} -> ok;
+ {Args1, Module1, ok} -> {become, Module1, Args1};
+ {stop, _Reason} = Error -> Error
+ end.
+
+change_view(View, State = #state { view = View0,
+ module = Module,
+ callback_args = Args }) ->
+ OldMembers = all_known_members(View0),
+ NewMembers = all_known_members(View),
+ Births = NewMembers -- OldMembers,
+ Deaths = OldMembers -- NewMembers,
+ Result = case {Births, Deaths} of
+ {[], []} -> ok;
+ _ -> Module:members_changed(
+ Args, get_pids(Births), get_pids(Deaths))
+ end,
+ {Result, check_neighbours(State #state { view = View })}.
+
+handle_callback_result({Result, State}) ->
+ if_callback_success(
+ Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State);
+handle_callback_result({Result, Reply, State}) ->
+ if_callback_success(
+ Result, fun reply_true/3, fun reply_false/3, Reply, State).
+
+no_reply_true (_Result, _Undefined, State) -> noreply(State).
+no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}.
+
+reply_true (_Result, Reply, State) -> reply(Reply, State).
+reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}.
+
+handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State).
+handle_msg_false(Result, _Msg, State) -> {Result, State}.
+
+activity_true(_Result, Activity, State = #state { module = Module,
+ callback_args = Args }) ->
+ {callback(Args, Module, Activity), State}.
+activity_false(Result, _Activity, State) ->
+ {Result, State}.
+
+if_callback_success(Result, True, False, Arg, State) ->
+ {NewResult, NewState} = maybe_stop(Result, State),
+ if_callback_success1(NewResult, True, False, Arg, NewState).
+
+if_callback_success1(ok, True, _False, Arg, State) ->
+ True(ok, Arg, State);
+if_callback_success1(
+ {become, Module, Args} = Result, True, _False, Arg, State) ->
+ True(Result, Arg, State #state { module = Module,
+ callback_args = Args });
+if_callback_success1({stop, _Reason} = Result, _True, False, Arg, State) ->
+ False(Result, Arg, State).
+
+maybe_stop({stop, Reason}, #state{ shutting_down = false } = State) ->
+ ShuttingDown = {true, Reason},
+ case has_pending_messages(State) of
+ true -> {ok, State #state{ shutting_down = ShuttingDown }};
+ false -> {{stop, Reason}, State #state{ shutting_down = ShuttingDown }}
+ end;
+maybe_stop(Result, #state{ shutting_down = false } = State) ->
+ {Result, State};
+maybe_stop(Result, #state{ shutting_down = {true, Reason} } = State) ->
+ case has_pending_messages(State) of
+ true -> {Result, State};
+ false -> {{stop, Reason}, State}
+ end.
+
+has_pending_messages(#state{ broadcast_buffer = Buffer })
+ when Buffer =/= [] ->
+ true;
+has_pending_messages(#state{ members_state = MembersState }) ->
+ MembersWithPubAckMismatches = maps:filter(fun(_Id, #member{last_pub = LP, last_ack = LA}) ->
+ LP =/= LA
+ end, MembersState),
+ 0 =/= maps:size(MembersWithPubAckMismatches).
+
+maybe_confirm(_Self, _Id, Confirms, []) ->
+ Confirms;
+maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) ->
+ case queue:out(Confirms) of
+ {empty, _Confirms} ->
+ Confirms;
+ {{value, {PubNum, From}}, Confirms1} ->
+ gen_server2:reply(From, ok),
+ maybe_confirm(Self, Self, Confirms1, PubNums);
+ {{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum ->
+ maybe_confirm(Self, Self, Confirms, PubNums)
+ end;
+maybe_confirm(_Self, _Id, Confirms, _PubNums) ->
+ Confirms.
+
+purge_confirms(Confirms) ->
+ _ = [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)],
+ queue:new().
+
+
+%% ---------------------------------------------------------------------------
+%% Msg transformation
+%% ---------------------------------------------------------------------------
+
+acks_from_queue(Q) -> [PubNum || {PubNum, _Msg} <- queue:to_list(Q)].
+
+pubs_from_queue(Q) -> queue:to_list(Q).
+
+queue_from_pubs(Pubs) -> queue:from_list(Pubs).
+
+apply_acks( [], Pubs) -> Pubs;
+apply_acks(List, Pubs) -> {_, Pubs1} = queue:split(length(List), Pubs),
+ Pubs1.
+
+join_pubs(Q, []) -> Q;
+join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)).
+
+last_ack( [], LA) -> LA;
+last_ack(List, LA) -> LA1 = lists:last(List),
+ true = LA1 > LA, %% ASSERTION
+ LA1.
+
+last_pub( [], LP) -> LP;
+last_pub(List, LP) -> {PubNum, _Msg} = lists:last(List),
+ true = PubNum > LP, %% ASSERTION
+ PubNum.
+
+%% ---------------------------------------------------------------------------
+
+%% Uninstrumented versions
+
+call(Pid, Msg, Timeout) -> gen_server2:call(Pid, Msg, Timeout).
+cast(Pid, Msg) -> gen_server2:cast(Pid, Msg).
+monitor(Pid) -> erlang:monitor(process, Pid).
+demonitor(MRef) -> erlang:demonitor(MRef).
+
+check_membership(Self, #gm_group{members = M} = Group) ->
+ case lists:member(Self, M) of
+ true ->
+ Group;
+ false ->
+ throw(lost_membership)
+ end;
+check_membership(_Self, {error, not_found}) ->
+ throw(lost_membership).
+
+check_membership(GroupName) ->
+ case dirty_read_group(GroupName) of
+ #gm_group{members = M} ->
+ case lists:keymember(self(), 2, M) of
+ true ->
+ ok;
+ false ->
+ throw(lost_membership)
+ end;
+ {error, not_found} ->
+ throw(lost_membership)
+ end.
+
+check_group({error, not_found}) ->
+ throw(lost_membership);
+check_group(Any) ->
+ Any.
diff --git a/deps/rabbit/src/internal_user.erl b/deps/rabbit/src/internal_user.erl
new file mode 100644
index 0000000000..b2bdcb6785
--- /dev/null
+++ b/deps/rabbit/src/internal_user.erl
@@ -0,0 +1,216 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(internal_user).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([
+ new/0,
+ new/1,
+ record_version_to_use/0,
+ fields/0,
+ fields/1,
+ upgrade/1,
+ upgrade_to/2,
+ pattern_match_all/0,
+ get_username/1,
+ get_password_hash/1,
+ get_tags/1,
+ get_hashing_algorithm/1,
+ get_limits/1,
+ create_user/3,
+ set_password_hash/3,
+ set_tags/2,
+ update_limits/3,
+ clear_limits/1
+]).
+
+-define(record_version, internal_user_v2).
+
+-type(username() :: binary()).
+
+-type(password_hash() :: binary()).
+
+-type internal_user() :: internal_user_v1:internal_user_v1() | internal_user_v2().
+
+-record(internal_user, {
+ username :: username() | '_',
+ password_hash :: password_hash() | '_',
+ tags :: [atom()] | '_',
+ %% password hashing implementation module,
+ %% typically rabbit_password_hashing_* but can
+ %% come from a plugin
+ hashing_algorithm :: atom() | '_',
+ limits = #{} :: map() | '_'}).
+
+-type(internal_user_v2() ::
+ #internal_user{username :: username() | '_',
+ password_hash :: password_hash() | '_',
+ tags :: [atom()] | '_',
+ hashing_algorithm :: atom() | '_',
+ limits :: map()}).
+
+-type internal_user_pattern() :: internal_user_v1:internal_user_v1_pattern() |
+ internal_user_v2_pattern().
+
+-type internal_user_v2_pattern() :: #internal_user{
+ username :: username() | '_',
+ password_hash :: '_',
+ tags :: '_',
+ hashing_algorithm :: '_',
+ limits :: '_'
+ }.
+
+-export_type([username/0,
+ password_hash/0,
+ internal_user/0,
+ internal_user_v2/0,
+ internal_user_pattern/0,
+ internal_user_v2_pattern/0]).
+
+-spec new() -> internal_user().
+new() ->
+ case record_version_to_use() of
+ ?record_version ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = []
+ };
+ _ ->
+ internal_user_v1:new()
+ end.
+
+-spec new(tuple()) -> internal_user().
+new({hashing_algorithm, HashingAlgorithm}) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = [],
+ hashing_algorithm = HashingAlgorithm
+ };
+ _ ->
+ internal_user_v1:new({hashing_algorithm, HashingAlgorithm})
+ end;
+new({tags, Tags}) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = Tags
+ };
+ _ ->
+ internal_user_v1:new({tags, Tags})
+ end.
+
+-spec record_version_to_use() -> internal_user_v1 | internal_user_v2.
+record_version_to_use() ->
+ case rabbit_feature_flags:is_enabled(user_limits) of
+ true -> ?record_version;
+ false -> internal_user_v1:record_version_to_use()
+ end.
+
+-spec fields() -> list().
+fields() ->
+ case record_version_to_use() of
+ ?record_version -> fields(?record_version);
+ _ -> internal_user_v1:fields()
+ end.
+
+-spec fields(atom()) -> list().
+fields(?record_version) -> record_info(fields, internal_user);
+fields(Version) -> internal_user_v1:fields(Version).
+
+-spec upgrade(internal_user()) -> internal_user().
+upgrade(#internal_user{} = User) -> User;
+upgrade(OldUser) -> upgrade_to(record_version_to_use(), OldUser).
+
+-spec upgrade_to
+(internal_user_v2, internal_user()) -> internal_user_v2();
+(internal_user_v1, internal_user_v1:internal_user_v1()) -> internal_user_v1:internal_user_v1().
+
+upgrade_to(?record_version, #internal_user{} = User) ->
+ User;
+upgrade_to(?record_version, OldUser) ->
+ Fields = erlang:tuple_to_list(OldUser) ++ [#{}],
+ #internal_user{} = erlang:list_to_tuple(Fields);
+upgrade_to(Version, OldUser) ->
+ internal_user_v1:upgrade_to(Version, OldUser).
+
+-spec pattern_match_all() -> internal_user_pattern().
+pattern_match_all() ->
+ case record_version_to_use() of
+ ?record_version -> #internal_user{_ = '_'};
+ _ -> internal_user_v1:pattern_match_all()
+ end.
+
+-spec get_username(internal_user()) -> username().
+get_username(#internal_user{username = Value}) -> Value;
+get_username(User) -> internal_user_v1:get_username(User).
+
+-spec get_password_hash(internal_user()) -> password_hash().
+get_password_hash(#internal_user{password_hash = Value}) -> Value;
+get_password_hash(User) -> internal_user_v1:get_password_hash(User).
+
+-spec get_tags(internal_user()) -> [atom()].
+get_tags(#internal_user{tags = Value}) -> Value;
+get_tags(User) -> internal_user_v1:get_tags(User).
+
+-spec get_hashing_algorithm(internal_user()) -> atom().
+get_hashing_algorithm(#internal_user{hashing_algorithm = Value}) -> Value;
+get_hashing_algorithm(User) -> internal_user_v1:get_hashing_algorithm(User).
+
+-spec get_limits(internal_user()) -> map().
+get_limits(#internal_user{limits = Value}) -> Value;
+get_limits(User) -> internal_user_v1:get_limits(User).
+
+-spec create_user(username(), password_hash(), atom()) -> internal_user().
+create_user(Username, PasswordHash, HashingMod) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #internal_user{username = Username,
+ password_hash = PasswordHash,
+ tags = [],
+ hashing_algorithm = HashingMod,
+ limits = #{}
+ };
+ _ ->
+ internal_user_v1:create_user(Username, PasswordHash, HashingMod)
+ end.
+
+-spec set_password_hash(internal_user(), password_hash(), atom()) -> internal_user().
+set_password_hash(#internal_user{} = User, PasswordHash, HashingAlgorithm) ->
+ User#internal_user{password_hash = PasswordHash,
+ hashing_algorithm = HashingAlgorithm};
+set_password_hash(User, PasswordHash, HashingAlgorithm) ->
+ internal_user_v1:set_password_hash(User, PasswordHash, HashingAlgorithm).
+
+-spec set_tags(internal_user(), [atom()]) -> internal_user().
+set_tags(#internal_user{} = User, Tags) ->
+ User#internal_user{tags = Tags};
+set_tags(User, Tags) ->
+ internal_user_v1:set_tags(User, Tags).
+
+-spec update_limits
+(add, internal_user(), map()) -> internal_user();
+(remove, internal_user(), term()) -> internal_user().
+update_limits(add, #internal_user{limits = Limits} = User, Term) ->
+ User#internal_user{limits = maps:merge(Limits, Term)};
+update_limits(remove, #internal_user{limits = Limits} = User, LimitType) ->
+ User#internal_user{limits = maps:remove(LimitType, Limits)};
+update_limits(Action, User, Term) ->
+ internal_user_v1:update_limits(Action, User, Term).
+
+-spec clear_limits(internal_user()) -> internal_user().
+clear_limits(#internal_user{} = User) ->
+ User#internal_user{limits = #{}};
+clear_limits(User) ->
+ internal_user_v1:clear_limits(User).
diff --git a/deps/rabbit/src/internal_user_v1.erl b/deps/rabbit/src/internal_user_v1.erl
new file mode 100644
index 0000000000..edb956436f
--- /dev/null
+++ b/deps/rabbit/src/internal_user_v1.erl
@@ -0,0 +1,151 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(internal_user_v1).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([
+ new/0,
+ new/1,
+ record_version_to_use/0,
+ fields/0,
+ fields/1,
+ upgrade/1,
+ upgrade_to/2,
+ pattern_match_all/0,
+ get_username/1,
+ get_password_hash/1,
+ get_tags/1,
+ get_hashing_algorithm/1,
+ get_limits/1,
+ create_user/3,
+ set_password_hash/3,
+ set_tags/2,
+ update_limits/3,
+ clear_limits/1
+]).
+
+-define(record_version, ?MODULE).
+
+-record(internal_user, {
+ username :: internal_user:username() | '_',
+ password_hash :: internal_user:password_hash() | '_',
+ tags :: [atom()] | '_',
+ %% password hashing implementation module,
+ %% typically rabbit_password_hashing_* but can
+ %% come from a plugin
+ hashing_algorithm :: atom() | '_'}).
+
+-type internal_user() :: internal_user_v1().
+
+-type(internal_user_v1() ::
+ #internal_user{username :: internal_user:username(),
+ password_hash :: internal_user:password_hash(),
+ tags :: [atom()],
+ hashing_algorithm :: atom()}).
+
+-type internal_user_pattern() :: internal_user_v1_pattern().
+
+-type internal_user_v1_pattern() :: #internal_user{
+ username :: internal_user:username() | '_',
+ password_hash :: '_',
+ tags :: '_',
+ hashing_algorithm :: '_'
+ }.
+
+-export_type([internal_user/0,
+ internal_user_v1/0,
+ internal_user_pattern/0,
+ internal_user_v1_pattern/0]).
+
+-spec record_version_to_use() -> internal_user_v1.
+record_version_to_use() ->
+ ?record_version.
+
+-spec new() -> internal_user().
+new() ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = []
+ }.
+
+-spec new(tuple()) -> internal_user().
+new({hashing_algorithm, HashingAlgorithm}) ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ hashing_algorithm = HashingAlgorithm,
+ tags = []
+ };
+new({tags, Tags}) ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = Tags
+ }.
+
+-spec fields() -> list().
+fields() -> fields(?record_version).
+
+-spec fields(atom()) -> list().
+fields(?record_version) -> record_info(fields, internal_user).
+
+-spec upgrade(internal_user()) -> internal_user().
+upgrade(#internal_user{} = User) -> User.
+
+-spec upgrade_to(internal_user_v1, internal_user()) -> internal_user().
+upgrade_to(?record_version, #internal_user{} = User) ->
+ User.
+
+-spec pattern_match_all() -> internal_user_pattern().
+pattern_match_all() -> #internal_user{_ = '_'}.
+
+-spec get_username(internal_user()) -> internal_user:username().
+get_username(#internal_user{username = Value}) -> Value.
+
+-spec get_password_hash(internal_user()) -> internal_user:password_hash().
+get_password_hash(#internal_user{password_hash = Value}) -> Value.
+
+-spec get_tags(internal_user()) -> [atom()].
+get_tags(#internal_user{tags = Value}) -> Value.
+
+-spec get_hashing_algorithm(internal_user()) -> atom().
+get_hashing_algorithm(#internal_user{hashing_algorithm = Value}) -> Value.
+
+-spec get_limits(internal_user()) -> map().
+get_limits(_User) -> #{}.
+
+-spec create_user(internal_user:username(), internal_user:password_hash(),
+ atom()) -> internal_user().
+create_user(Username, PasswordHash, HashingMod) ->
+ #internal_user{username = Username,
+ password_hash = PasswordHash,
+ tags = [],
+ hashing_algorithm = HashingMod
+ }.
+
+-spec set_password_hash(internal_user:internal_user(),
+ internal_user:password_hash(), atom()) -> internal_user().
+set_password_hash(#internal_user{} = User, PasswordHash, HashingAlgorithm) ->
+ User#internal_user{password_hash = PasswordHash,
+ hashing_algorithm = HashingAlgorithm}.
+
+-spec set_tags(internal_user(), [atom()]) -> internal_user().
+set_tags(#internal_user{} = User, Tags) ->
+ User#internal_user{tags = Tags}.
+
+-spec update_limits
+(add, internal_user(), map()) -> internal_user();
+(remove, internal_user(), term()) -> internal_user().
+update_limits(_, User, _) ->
+ User.
+
+-spec clear_limits(internal_user()) -> internal_user().
+clear_limits(User) ->
+ User.
diff --git a/deps/rabbit/src/lager_exchange_backend.erl b/deps/rabbit/src/lager_exchange_backend.erl
new file mode 100644
index 0000000000..cd96f2230e
--- /dev/null
+++ b/deps/rabbit/src/lager_exchange_backend.erl
@@ -0,0 +1,233 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc RabbitMQ backend for lager.
+%% Configuration is a proplist with the following keys:
+%% <ul>
+%% <li>`level' - log level to use</li>
+%% <li>`formatter' - the module to use when formatting log messages. Defaults to
+%% `lager_default_formatter'</li>
+%% <li>`formatter_config' - the format configuration string. Defaults to
+%% `time [ severity ] message'</li>
+%% </ul>
+
+-module(lager_exchange_backend).
+
+-behaviour(gen_event).
+
+-export([init/1, terminate/2, code_change/3,
+ handle_call/2, handle_event/2, handle_info/2]).
+
+-export([maybe_init_exchange/0]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-include_lib("lager/include/lager.hrl").
+
+-record(state, {level :: {'mask', integer()},
+ formatter :: atom(),
+ format_config :: any(),
+ init_exchange_ts = undefined :: integer() | undefined,
+ exchange = undefined :: #resource{} | undefined}).
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-compile([{parse_transform, lager_transform}]).
+-endif.
+
+-define(INIT_EXCHANGE_INTERVAL_SECS, 5).
+-define(TERSE_FORMAT, [time, " [", severity, "] ", message]).
+-define(DEFAULT_FORMAT_CONFIG, ?TERSE_FORMAT).
+-define(FORMAT_CONFIG_OFF, []).
+
+-ifdef(TEST).
+-define(DEPRECATED(_Msg), ok).
+-else.
+-define(DEPRECATED(Msg),
+ io:format(user, "WARNING: This is a deprecated lager_exchange_backend configuration. Please use \"~w\" instead.~n", [Msg])).
+-endif.
+
+-define(LOG_EXCH_NAME, <<"amq.rabbitmq.log">>).
+
+init([Level]) when is_atom(Level) ->
+ ?DEPRECATED([{level, Level}]),
+ init([{level, Level}]);
+init([Level, true]) when is_atom(Level) -> % for backwards compatibility
+ ?DEPRECATED([{level, Level}, {formatter_config, [{eol, "\\r\\n\\"}]}]),
+ init([{level, Level}, {formatter_config, ?FORMAT_CONFIG_OFF}]);
+init([Level, false]) when is_atom(Level) -> % for backwards compatibility
+ ?DEPRECATED([{level, Level}]),
+ init([{level, Level}]);
+
+init(Options) when is_list(Options) ->
+ true = validate_options(Options),
+ Level = get_option(level, Options, undefined),
+ try lager_util:config_to_mask(Level) of
+ L ->
+ DefaultOptions = [{formatter, lager_default_formatter},
+ {formatter_config, ?DEFAULT_FORMAT_CONFIG}],
+ [Formatter, Config] = [get_option(K, Options, Default) || {K, Default} <- DefaultOptions],
+ State0 = #state{level=L,
+ formatter=Formatter,
+ format_config=Config},
+ % NB: this will probably always fail since the / vhost isn't available
+ State1 = maybe_init_exchange(State0),
+ {ok, State1}
+ catch
+ _:_ ->
+ {error, {fatal, bad_log_level}}
+ end;
+init(Level) when is_atom(Level) ->
+ ?DEPRECATED([{level, Level}]),
+ init([{level, Level}]);
+init(Other) ->
+ {error, {fatal, {bad_lager_exchange_backend_config, Other}}}.
+
+% rabbitmq/rabbitmq-server#1973
+% This is called immediatly after the / vhost is created
+% or recovered
+maybe_init_exchange() ->
+ case lists:member(?MODULE, gen_event:which_handlers(lager_event)) of
+ true ->
+ _ = init_exchange(true),
+ ok;
+ _ ->
+ ok
+ end.
+
+validate_options([]) -> true;
+validate_options([{level, L}|T]) when is_atom(L) ->
+ case lists:member(L, ?LEVELS) of
+ false ->
+ throw({error, {fatal, {bad_level, L}}});
+ true ->
+ validate_options(T)
+ end;
+validate_options([{formatter, M}|T]) when is_atom(M) ->
+ validate_options(T);
+validate_options([{formatter_config, C}|T]) when is_list(C) ->
+ validate_options(T);
+validate_options([H|_]) ->
+ throw({error, {fatal, {bad_lager_exchange_backend_config, H}}}).
+
+get_option(K, Options, Default) ->
+ case lists:keyfind(K, 1, Options) of
+ {K, V} -> V;
+ false -> Default
+ end.
+
+handle_call(get_loglevel, #state{level=Level} = State) ->
+ {ok, Level, State};
+handle_call({set_loglevel, Level}, State) ->
+ try lager_util:config_to_mask(Level) of
+ Levels ->
+ {ok, ok, State#state{level=Levels}}
+ catch
+ _:_ ->
+ {ok, {error, bad_log_level}, State}
+ end;
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+handle_event({log, _Message} = Event, State0) ->
+ State1 = maybe_init_exchange(State0),
+ handle_log_event(Event, State1);
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% @private
+handle_log_event({log, _Message}, #state{exchange=undefined} = State) ->
+ % NB: tried to define the exchange but still undefined,
+ % so not logging this message. Note: we can't log this dropped
+ % message because it will start an infinite loop
+ {ok, State};
+handle_log_event({log, Message},
+ #state{level=L, exchange=LogExch,
+ formatter=Formatter, format_config=FormatConfig} = State) ->
+ case lager_util:is_loggable(Message, L, ?MODULE) of
+ true ->
+ %% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's
+ %% second resolution, not millisecond.
+ RoutingKey = rabbit_data_coercion:to_binary(lager_msg:severity(Message)),
+ Timestamp = os:system_time(seconds),
+ Node = rabbit_data_coercion:to_binary(node()),
+ Headers = [{<<"node">>, longstr, Node}],
+ AmqpMsg = #'P_basic'{content_type = <<"text/plain">>,
+ timestamp = Timestamp,
+ headers = Headers},
+ Body = rabbit_data_coercion:to_binary(Formatter:format(Message, FormatConfig)),
+ case rabbit_basic:publish(LogExch, RoutingKey, AmqpMsg, Body) of
+ ok -> ok;
+ {error, not_found} -> ok
+ end,
+ {ok, State};
+ false ->
+ {ok, State}
+ end.
+
+%% @private
+maybe_init_exchange(#state{exchange=undefined, init_exchange_ts=undefined} = State) ->
+ Now = erlang:monotonic_time(second),
+ handle_init_exchange(init_exchange(true), Now, State);
+maybe_init_exchange(#state{exchange=undefined, init_exchange_ts=Timestamp} = State) ->
+ Now = erlang:monotonic_time(second),
+ % NB: since we may try to declare the exchange on every log message, this ensures
+ % that we only try once every 5 seconds
+ HasEnoughTimeElapsed = Now - Timestamp > ?INIT_EXCHANGE_INTERVAL_SECS,
+ Result = init_exchange(HasEnoughTimeElapsed),
+ handle_init_exchange(Result, Now, State);
+maybe_init_exchange(State) ->
+ State.
+
+%% @private
+init_exchange(true) ->
+ {ok, DefaultVHost} = application:get_env(rabbit, default_vhost),
+ Exchange = rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME),
+ try
+ %% durable
+ #exchange{} = rabbit_exchange:declare(Exchange, topic, true, false, true, [], ?INTERNAL_USER),
+ rabbit_log:info("Declared exchange '~s' in vhost '~s'", [?LOG_EXCH_NAME, DefaultVHost]),
+ {ok, Exchange}
+ catch
+ ErrType:Err ->
+ rabbit_log:error("Could not declare exchange '~s' in vhost '~s', reason: ~p:~p",
+ [?LOG_EXCH_NAME, DefaultVHost, ErrType, Err]),
+ {ok, undefined}
+ end;
+init_exchange(_) ->
+ {ok, undefined}.
+
+%% @private
+handle_init_exchange({ok, undefined}, Now, State) ->
+ State#state{init_exchange_ts=Now};
+handle_init_exchange({ok, Exchange}, Now, State) ->
+ State#state{exchange=Exchange, init_exchange_ts=Now}.
+
+-ifdef(TEST).
+console_config_validation_test_() ->
+ Good = [{level, info}],
+ Bad1 = [{level, foo}],
+ Bad2 = [{larval, info}],
+ AllGood = [{level, info}, {formatter, my_formatter},
+ {formatter_config, ["blort", "garbage"]}],
+ [
+ ?_assertEqual(true, validate_options(Good)),
+ ?_assertThrow({error, {fatal, {bad_level, foo}}}, validate_options(Bad1)),
+ ?_assertThrow({error, {fatal, {bad_lager_exchange_backend_config, {larval, info}}}}, validate_options(Bad2)),
+ ?_assertEqual(true, validate_options(AllGood))
+ ].
+-endif.
diff --git a/deps/rabbit/src/lqueue.erl b/deps/rabbit/src/lqueue.erl
new file mode 100644
index 0000000000..1e267210d9
--- /dev/null
+++ b/deps/rabbit/src/lqueue.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(lqueue).
+
+%% lqueue implements a subset of Erlang's queue module. lqueues
+%% maintain their own length, so lqueue:len/1
+%% is an O(1) operation, in contrast with queue:len/1 which is O(n).
+
+-export([new/0, is_empty/1, len/1, in/2, in_r/2, out/1, out_r/1, join/2,
+ foldl/3, foldr/3, from_list/1, drop/1, to_list/1, peek/1, peek_r/1]).
+
+-define(QUEUE, queue).
+
+-export_type([
+ ?MODULE/0,
+ ?MODULE/1
+ ]).
+
+-opaque ?MODULE() :: ?MODULE(_).
+-opaque ?MODULE(T) :: {non_neg_integer(), queue:queue(T)}.
+-type value() :: any().
+-type result(T) :: 'empty' | {'value', T}.
+
+-spec new() -> ?MODULE(_).
+
+new() -> {0, ?QUEUE:new()}.
+
+-spec drop(?MODULE(T)) -> ?MODULE(T).
+
+drop({L, Q}) -> {L - 1, ?QUEUE:drop(Q)}.
+
+-spec is_empty(?MODULE(_)) -> boolean().
+
+is_empty({0, _Q}) -> true;
+is_empty(_) -> false.
+
+-spec in(T, ?MODULE(T)) -> ?MODULE(T).
+
+in(V, {L, Q}) -> {L+1, ?QUEUE:in(V, Q)}.
+
+-spec in_r(value(), ?MODULE(T)) -> ?MODULE(T).
+
+in_r(V, {L, Q}) -> {L+1, ?QUEUE:in_r(V, Q)}.
+
+-spec out(?MODULE(T)) -> {result(T), ?MODULE(T)}.
+
+out({0, _Q} = Q) -> {empty, Q};
+out({L, Q}) -> {Result, Q1} = ?QUEUE:out(Q),
+ {Result, {L-1, Q1}}.
+
+-spec out_r(?MODULE(T)) -> {result(T), ?MODULE(T)}.
+
+out_r({0, _Q} = Q) -> {empty, Q};
+out_r({L, Q}) -> {Result, Q1} = ?QUEUE:out_r(Q),
+ {Result, {L-1, Q1}}.
+
+-spec join(?MODULE(A), ?MODULE(B)) -> ?MODULE(A | B).
+
+join({L1, Q1}, {L2, Q2}) -> {L1 + L2, ?QUEUE:join(Q1, Q2)}.
+
+-spec to_list(?MODULE(T)) -> [T].
+
+to_list({_L, Q}) -> ?QUEUE:to_list(Q).
+
+-spec from_list([T]) -> ?MODULE(T).
+
+from_list(L) -> {length(L), ?QUEUE:from_list(L)}.
+
+-spec foldl(fun ((T, B) -> B), B, ?MODULE(T)) -> B.
+
+foldl(Fun, Init, Q) ->
+ case out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> foldl(Fun, Fun(V, Init), Q1)
+ end.
+
+-spec foldr(fun ((T, B) -> B), B, ?MODULE(T)) -> B.
+
+foldr(Fun, Init, Q) ->
+ case out_r(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> foldr(Fun, Fun(V, Init), Q1)
+ end.
+
+-spec len(?MODULE(_)) -> non_neg_integer().
+
+len({L, _}) -> L.
+
+-spec peek(?MODULE(T)) -> result(T).
+
+peek({ 0, _Q}) -> empty;
+peek({_L, Q}) -> ?QUEUE:peek(Q).
+
+-spec peek_r(?MODULE(T)) -> result(T).
+
+peek_r({ 0, _Q}) -> empty;
+peek_r({_L, Q}) -> ?QUEUE:peek_r(Q).
diff --git a/deps/rabbit/src/mirrored_supervisor_sups.erl b/deps/rabbit/src/mirrored_supervisor_sups.erl
new file mode 100644
index 0000000000..b29d4d48e6
--- /dev/null
+++ b/deps/rabbit/src/mirrored_supervisor_sups.erl
@@ -0,0 +1,34 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mirrored_supervisor_sups).
+
+-define(SUPERVISOR, supervisor2).
+-define(GS_MODULE, mirrored_supervisor).
+
+-behaviour(?SUPERVISOR).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+init({overall, _Group, _TxFun, ignore}) -> ignore;
+init({overall, Group, TxFun, {ok, {Restart, ChildSpecs}}}) ->
+ %% Important: Delegate MUST start before Mirroring so that when we
+ %% shut down from above it shuts down last, so Mirroring does not
+ %% see it die.
+ %%
+ %% See comment in handle_info('DOWN', ...) in mirrored_supervisor
+ {ok, {{one_for_all, 0, 1},
+ [{delegate, {?SUPERVISOR, start_link, [?MODULE, {delegate, Restart}]},
+ temporary, 16#ffffffff, supervisor, [?SUPERVISOR]},
+ {mirroring, {?GS_MODULE, start_internal, [Group, TxFun, ChildSpecs]},
+ permanent, 16#ffffffff, worker, [?MODULE]}]}};
+
+
+init({delegate, Restart}) ->
+ {ok, {Restart, []}}.
diff --git a/deps/rabbit/src/pg_local.erl b/deps/rabbit/src/pg_local.erl
new file mode 100644
index 0000000000..263e743d1f
--- /dev/null
+++ b/deps/rabbit/src/pg_local.erl
@@ -0,0 +1,249 @@
+%% This file is a copy of pg2.erl from the R13B-3 Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) Process groups are node-local only.
+%%
+%% 2) Groups are created/deleted implicitly.
+%%
+%% 3) 'join' and 'leave' are asynchronous.
+%%
+%% 4) the type specs of the exported non-callback functions have been
+%% extracted into a separate, guarded section, and rewritten in
+%% old-style spec syntax, for better compatibility with older
+%% versions of Erlang/OTP. The remaining type specs have been
+%% removed.
+
+%% All modifications are (C) 2010-2020 VMware, Inc. or its affiliates.
+
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at https://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(pg_local).
+
+-export([join/2, leave/2, get_members/1, in_group/2]).
+%% intended for testing only; not part of official API
+-export([sync/0, clear/0]).
+-export([start/0, start_link/0, init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2]).
+
+%%----------------------------------------------------------------------------
+
+-type name() :: term().
+
+%%----------------------------------------------------------------------------
+
+-define(TABLE, pg_local_table).
+
+%%%
+%%% Exported functions
+%%%
+
+-spec start_link() -> {'ok', pid()} | {'error', any()}.
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+-spec start() -> {'ok', pid()} | {'error', any()}.
+
+start() ->
+ ensure_started().
+
+-spec join(name(), pid()) -> 'ok'.
+
+join(Name, Pid) when is_pid(Pid) ->
+ _ = ensure_started(),
+ gen_server:cast(?MODULE, {join, Name, Pid}).
+
+-spec leave(name(), pid()) -> 'ok'.
+
+leave(Name, Pid) when is_pid(Pid) ->
+ _ = ensure_started(),
+ gen_server:cast(?MODULE, {leave, Name, Pid}).
+
+-spec get_members(name()) -> [pid()].
+
+get_members(Name) ->
+ _ = ensure_started(),
+ group_members(Name).
+
+-spec in_group(name(), pid()) -> boolean().
+
+in_group(Name, Pid) ->
+ _ = ensure_started(),
+ %% The join message is a cast and thus can race, but we want to
+ %% keep it that way to be fast in the common case.
+ case member_present(Name, Pid) of
+ true -> true;
+ false -> sync(),
+ member_present(Name, Pid)
+ end.
+
+-spec sync() -> 'ok'.
+
+sync() ->
+ _ = ensure_started(),
+ gen_server:call(?MODULE, sync, infinity).
+
+clear() ->
+ _ = ensure_started(),
+ gen_server:call(?MODULE, clear, infinity).
+
+%%%
+%%% Callback functions from gen_server
+%%%
+
+-record(state, {}).
+
+init([]) ->
+ ?TABLE = ets:new(?TABLE, [ordered_set, protected, named_table]),
+ {ok, #state{}}.
+
+handle_call(sync, _From, S) ->
+ {reply, ok, S};
+
+handle_call(clear, _From, S) ->
+ ets:delete_all_objects(?TABLE),
+ {reply, ok, S};
+
+handle_call(Request, From, S) ->
+ error_logger:warning_msg("The pg_local server received an unexpected message:\n"
+ "handle_call(~p, ~p, _)\n",
+ [Request, From]),
+ {noreply, S}.
+
+handle_cast({join, Name, Pid}, S) ->
+ _ = join_group(Name, Pid),
+ {noreply, S};
+handle_cast({leave, Name, Pid}, S) ->
+ leave_group(Name, Pid),
+ {noreply, S};
+handle_cast(_, S) ->
+ {noreply, S}.
+
+handle_info({'DOWN', MonitorRef, process, Pid, _Info}, S) ->
+ member_died(MonitorRef, Pid),
+ {noreply, S};
+handle_info(_, S) ->
+ {noreply, S}.
+
+terminate(_Reason, _S) ->
+ true = ets:delete(?TABLE),
+ ok.
+
+%%%
+%%% Local functions
+%%%
+
+%%% One ETS table, pg_local_table, is used for bookkeeping. The type of the
+%%% table is ordered_set, and the fast matching of partially
+%%% instantiated keys is used extensively.
+%%%
+%%% {{ref, Pid}, MonitorRef, Counter}
+%%% {{ref, MonitorRef}, Pid}
+%%% Each process has one monitor. Counter is incremented when the
+%%% Pid joins some group.
+%%% {{member, Name, Pid}, _}
+%%% Pid is a member of group Name, GroupCounter is incremented when the
+%%% Pid joins the group Name.
+%%% {{pid, Pid, Name}}
+%%% Pid is a member of group Name.
+
+member_died(Ref, Pid) ->
+ case ets:lookup(?TABLE, {ref, Ref}) of
+ [{{ref, Ref}, Pid}] ->
+ leave_all_groups(Pid);
+ %% in case the key has already been removed
+ %% we can clean up using the value from the DOWN message
+ _ ->
+ leave_all_groups(Pid)
+ end,
+ ok.
+
+leave_all_groups(Pid) ->
+ Names = member_groups(Pid),
+ _ = [leave_group(Name, P) ||
+ Name <- Names,
+ P <- member_in_group(Pid, Name)].
+
+join_group(Name, Pid) ->
+ Ref_Pid = {ref, Pid},
+ try _ = ets:update_counter(?TABLE, Ref_Pid, {3, +1})
+ catch _:_ ->
+ Ref = erlang:monitor(process, Pid),
+ true = ets:insert(?TABLE, {Ref_Pid, Ref, 1}),
+ true = ets:insert(?TABLE, {{ref, Ref}, Pid})
+ end,
+ Member_Name_Pid = {member, Name, Pid},
+ try _ = ets:update_counter(?TABLE, Member_Name_Pid, {2, +1})
+ catch _:_ ->
+ true = ets:insert(?TABLE, {Member_Name_Pid, 1}),
+ true = ets:insert(?TABLE, {{pid, Pid, Name}})
+ end.
+
+leave_group(Name, Pid) ->
+ Member_Name_Pid = {member, Name, Pid},
+ try ets:update_counter(?TABLE, Member_Name_Pid, {2, -1}) of
+ N ->
+ if
+ N =:= 0 ->
+ true = ets:delete(?TABLE, {pid, Pid, Name}),
+ true = ets:delete(?TABLE, Member_Name_Pid);
+ true ->
+ ok
+ end,
+ Ref_Pid = {ref, Pid},
+ case ets:update_counter(?TABLE, Ref_Pid, {3, -1}) of
+ 0 ->
+ [{Ref_Pid,Ref,0}] = ets:lookup(?TABLE, Ref_Pid),
+ true = ets:delete(?TABLE, {ref, Ref}),
+ true = ets:delete(?TABLE, Ref_Pid),
+ true = erlang:demonitor(Ref, [flush]),
+ ok;
+ _ ->
+ ok
+ end
+ catch _:_ ->
+ ok
+ end.
+
+group_members(Name) ->
+ [P ||
+ [P, N] <- ets:match(?TABLE, {{member, Name, '$1'},'$2'}),
+ _ <- lists:seq(1, N)].
+
+member_in_group(Pid, Name) ->
+ [{{member, Name, Pid}, N}] = ets:lookup(?TABLE, {member, Name, Pid}),
+ lists:duplicate(N, Pid).
+
+member_present(Name, Pid) ->
+ case ets:lookup(?TABLE, {member, Name, Pid}) of
+ [_] -> true;
+ [] -> false
+ end.
+
+member_groups(Pid) ->
+ [Name || [Name] <- ets:match(?TABLE, {{pid, Pid, '$1'}})].
+
+ensure_started() ->
+ case whereis(?MODULE) of
+ undefined ->
+ C = {pg_local, {?MODULE, start_link, []}, permanent,
+ 16#ffffffff, worker, [?MODULE]},
+ supervisor:start_child(kernel_safe_sup, C);
+ PgLocalPid ->
+ {ok, PgLocalPid}
+ end.
diff --git a/deps/rabbit/src/rabbit.erl b/deps/rabbit/src/rabbit.erl
new file mode 100644
index 0000000000..9248c945dc
--- /dev/null
+++ b/deps/rabbit/src/rabbit.erl
@@ -0,0 +1,1511 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-behaviour(application).
+
+-export([start/0, boot/0, stop/0,
+ stop_and_halt/0, await_startup/0, await_startup/1, await_startup/3,
+ status/0, is_running/0, alarms/0,
+ is_running/1, environment/0, rotate_logs/0, force_event_refresh/1,
+ start_fhc/0]).
+
+-export([start/2, stop/1, prep_stop/1]).
+-export([start_apps/1, start_apps/2, stop_apps/1]).
+-export([product_info/0,
+ product_name/0,
+ product_version/0,
+ base_product_name/0,
+ base_product_version/0,
+ motd_file/0,
+ motd/0]).
+-export([log_locations/0, config_files/0]). %% for testing and mgmt-agent
+-export([is_booted/1, is_booted/0, is_booting/1, is_booting/0]).
+
+%%---------------------------------------------------------------------------
+%% Boot steps.
+-export([maybe_insert_default_data/0, boot_delegate/0, recover/0]).
+
+%% for tests
+-export([validate_msg_store_io_batch_size_and_credit_disc_bound/2]).
+
+-rabbit_boot_step({pre_boot, [{description, "rabbit boot start"}]}).
+
+-rabbit_boot_step({codec_correctness_check,
+ [{description, "codec correctness check"},
+ {mfa, {rabbit_binary_generator,
+ check_empty_frame_size,
+ []}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+%% rabbit_alarm currently starts memory and disk space monitors
+-rabbit_boot_step({rabbit_alarm,
+ [{description, "alarm handler"},
+ {mfa, {rabbit_alarm, start, []}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({feature_flags,
+ [{description, "feature flags registry and initial state"},
+ {mfa, {rabbit_feature_flags, init, []}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({database,
+ [{mfa, {rabbit_mnesia, init, []}},
+ {requires, file_handle_cache},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({database_sync,
+ [{description, "database sync"},
+ {mfa, {rabbit_sup, start_child, [mnesia_sync]}},
+ {requires, database},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({code_server_cache,
+ [{description, "code_server cache server"},
+ {mfa, {rabbit_sup, start_child, [code_server_cache]}},
+ {requires, rabbit_alarm},
+ {enables, file_handle_cache}]}).
+
+-rabbit_boot_step({file_handle_cache,
+ [{description, "file handle cache server"},
+ {mfa, {rabbit, start_fhc, []}},
+ %% FHC needs memory monitor to be running
+ {requires, code_server_cache},
+ {enables, worker_pool}]}).
+
+-rabbit_boot_step({worker_pool,
+ [{description, "default worker pool"},
+ {mfa, {rabbit_sup, start_supervisor_child,
+ [worker_pool_sup]}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({definition_import_worker_pool,
+ [{description, "dedicated worker pool for definition import"},
+ {mfa, {rabbit_definitions, boot, []}},
+ {requires, external_infrastructure}]}).
+
+-rabbit_boot_step({external_infrastructure,
+ [{description, "external infrastructure ready"}]}).
+
+-rabbit_boot_step({rabbit_registry,
+ [{description, "plugin registry"},
+ {mfa, {rabbit_sup, start_child,
+ [rabbit_registry]}},
+ {requires, external_infrastructure},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step({rabbit_core_metrics,
+ [{description, "core metrics storage"},
+ {mfa, {rabbit_sup, start_child,
+ [rabbit_metrics]}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({rabbit_osiris_metrics,
+ [{description, "osiris metrics scraper"},
+ {mfa, {rabbit_sup, start_child,
+ [rabbit_osiris_metrics]}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+%% -rabbit_boot_step({rabbit_stream_coordinator,
+%% [{description, "stream queues coordinator"},
+%% {mfa, {rabbit_stream_coordinator, start,
+%% []}},
+%% {requires, pre_boot},
+%% {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({rabbit_event,
+ [{description, "statistics event manager"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_event]}},
+ {requires, external_infrastructure},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step({kernel_ready,
+ [{description, "kernel ready"},
+ {requires, external_infrastructure}]}).
+
+-rabbit_boot_step({rabbit_memory_monitor,
+ [{description, "memory monitor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_memory_monitor]}},
+ {requires, rabbit_alarm},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({guid_generator,
+ [{description, "guid generator"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_guid]}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({delegate_sup,
+ [{description, "cluster delegate"},
+ {mfa, {rabbit, boot_delegate, []}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({rabbit_node_monitor,
+ [{description, "node monitor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_node_monitor]}},
+ {requires, [rabbit_alarm, guid_generator]},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({rabbit_epmd_monitor,
+ [{description, "epmd monitor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_epmd_monitor]}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({rabbit_sysmon_minder,
+ [{description, "sysmon_handler supervisor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_sysmon_minder]}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({core_initialized,
+ [{description, "core initialized"},
+ {requires, kernel_ready}]}).
+
+-rabbit_boot_step({upgrade_queues,
+ [{description, "per-vhost message store migration"},
+ {mfa, {rabbit_upgrade,
+ maybe_migrate_queues_to_per_vhost_storage,
+ []}},
+ {requires, [core_initialized]},
+ {enables, recovery}]}).
+
+-rabbit_boot_step({recovery,
+ [{description, "exchange, queue and binding recovery"},
+ {mfa, {rabbit, recover, []}},
+ {requires, [core_initialized]},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({empty_db_check,
+ [{description, "empty DB check"},
+ {mfa, {?MODULE, maybe_insert_default_data, []}},
+ {requires, recovery},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({routing_ready,
+ [{description, "message delivery logic ready"},
+ {requires, [core_initialized, recovery]}]}).
+
+-rabbit_boot_step({connection_tracking,
+ [{description, "connection tracking infrastructure"},
+ {mfa, {rabbit_connection_tracking, boot, []}},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({channel_tracking,
+ [{description, "channel tracking infrastructure"},
+ {mfa, {rabbit_channel_tracking, boot, []}},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({background_gc,
+ [{description, "background garbage collection"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [background_gc]}},
+ {requires, [core_initialized, recovery]},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({rabbit_core_metrics_gc,
+ [{description, "background core metrics garbage collection"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_core_metrics_gc]}},
+ {requires, [core_initialized, recovery]},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({rabbit_looking_glass,
+ [{description, "Looking Glass tracer and profiler"},
+ {mfa, {rabbit_looking_glass, boot, []}},
+ {requires, [core_initialized, recovery]},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({pre_flight,
+ [{description, "ready to communicate with peers and clients"},
+ {requires, [core_initialized, recovery, routing_ready]}]}).
+
+-rabbit_boot_step({cluster_name,
+ [{description, "sets cluster name if configured"},
+ {mfa, {rabbit_nodes, boot, []}},
+ {requires, pre_flight}
+ ]}).
+
+-rabbit_boot_step({direct_client,
+ [{description, "direct client"},
+ {mfa, {rabbit_direct, boot, []}},
+ {requires, pre_flight}
+ ]}).
+
+-rabbit_boot_step({notify_cluster,
+ [{description, "notifies cluster peers of our presence"},
+ {mfa, {rabbit_node_monitor, notify_node_up, []}},
+ {requires, pre_flight}]}).
+
+-rabbit_boot_step({networking,
+ [{description, "TCP and TLS listeners (backwards compatibility)"},
+ {mfa, {rabbit_log, debug, ["'networking' boot step skipped and moved to end of startup", []]}},
+ {requires, notify_cluster}]}).
+
+%%---------------------------------------------------------------------------
+
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-define(APPS, [os_mon, mnesia, rabbit_common, rabbitmq_prelaunch, ra, sysmon_handler, rabbit, osiris]).
+
+-define(ASYNC_THREADS_WARNING_THRESHOLD, 8).
+
+%% 1 minute
+-define(BOOT_START_TIMEOUT, 1 * 60 * 1000).
+%% 12 hours
+-define(BOOT_FINISH_TIMEOUT, 12 * 60 * 60 * 1000).
+%% 100 ms
+-define(BOOT_STATUS_CHECK_INTERVAL, 100).
+
+%%----------------------------------------------------------------------------
+
+-type restart_type() :: 'permanent' | 'transient' | 'temporary'.
+
+-type param() :: atom().
+-type app_name() :: atom().
+
+%%----------------------------------------------------------------------------
+
+-spec start() -> 'ok'.
+
+start() ->
+ %% start() vs. boot(): we want to throw an error in start().
+ start_it(temporary).
+
+-spec boot() -> 'ok'.
+
+boot() ->
+ %% start() vs. boot(): we want the node to exit in boot(). Because
+ %% applications are started with `transient`, any error during their
+ %% startup will abort the node.
+ start_it(transient).
+
+run_prelaunch_second_phase() ->
+ %% Finish the prelaunch phase started by the `rabbitmq_prelaunch`
+ %% application.
+ %%
+ %% The first phase was handled by the `rabbitmq_prelaunch`
+ %% application. It was started in one of the following way:
+ %% - from an Erlang release boot script;
+ %% - from the rabbit:boot/0 or rabbit:start/0 functions.
+ %%
+ %% The `rabbitmq_prelaunch` application creates the context map from
+ %% the environment and the configuration files early during Erlang
+ %% VM startup. Once it is done, all application environments are
+ %% configured (in particular `mnesia` and `ra`).
+ %%
+ %% This second phase depends on other modules & facilities of
+ %% RabbitMQ core. That's why we need to run it now, from the
+ %% `rabbit` application start function.
+
+ %% We assert Mnesia is stopped before we run the prelaunch
+ %% phases. See `rabbit_prelaunch` for an explanation.
+ %%
+ %% This is the second assertion, just in case Mnesia is started
+ %% between the two prelaunch phases.
+ rabbit_prelaunch:assert_mnesia_is_stopped(),
+
+ %% Get the context created by `rabbitmq_prelaunch` then proceed
+ %% with all steps in this phase.
+ #{initial_pass := IsInitialPass} =
+ Context = rabbit_prelaunch:get_context(),
+
+ case IsInitialPass of
+ true ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug(
+ "== Prelaunch phase [2/2] (initial pass) ==");
+ false ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Prelaunch phase [2/2] =="),
+ ok
+ end,
+
+ %% 1. Enabled plugins file.
+ ok = rabbit_prelaunch_enabled_plugins_file:setup(Context),
+
+ %% 2. Feature flags registry.
+ ok = rabbit_prelaunch_feature_flags:setup(Context),
+
+ %% 3. Logging.
+ ok = rabbit_prelaunch_logging:setup(Context),
+
+ %% 4. Clustering.
+ ok = rabbit_prelaunch_cluster:setup(Context),
+
+ %% Start Mnesia now that everything is ready.
+ rabbit_log_prelaunch:debug("Starting Mnesia"),
+ ok = mnesia:start(),
+
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Prelaunch DONE =="),
+
+ case IsInitialPass of
+ true -> rabbit_prelaunch:initial_pass_finished();
+ false -> ok
+ end,
+ ok.
+
+start_it(StartType) ->
+ case spawn_boot_marker() of
+ {ok, Marker} ->
+ T0 = erlang:timestamp(),
+ rabbit_log:info("RabbitMQ is asked to start...", []),
+ try
+ {ok, _} = application:ensure_all_started(rabbitmq_prelaunch,
+ StartType),
+ {ok, _} = application:ensure_all_started(rabbit,
+ StartType),
+ ok = wait_for_ready_or_stopped(),
+
+ T1 = erlang:timestamp(),
+ rabbit_log_prelaunch:debug(
+ "Time to start RabbitMQ: ~p µs",
+ [timer:now_diff(T1, T0)]),
+ stop_boot_marker(Marker),
+ ok
+ catch
+ error:{badmatch, Error}:_ ->
+ stop_boot_marker(Marker),
+ case StartType of
+ temporary -> throw(Error);
+ _ -> exit(Error)
+ end
+ end;
+ {already_booting, Marker} ->
+ stop_boot_marker(Marker),
+ ok
+ end.
+
+wait_for_ready_or_stopped() ->
+ ok = rabbit_boot_state:wait_for(ready, ?BOOT_FINISH_TIMEOUT),
+ case rabbit_boot_state:get() of
+ ready ->
+ ok;
+ _ ->
+ ok = rabbit_boot_state:wait_for(stopped, ?BOOT_FINISH_TIMEOUT),
+ rabbit_prelaunch:get_stop_reason()
+ end.
+
+spawn_boot_marker() ->
+ %% Compatibility with older RabbitMQ versions:
+ %% We register a process doing nothing to indicate that RabbitMQ is
+ %% booting. This is checked by `is_booting(Node)` on a remote node.
+ Marker = spawn_link(fun() -> receive stop -> ok end end),
+ case catch register(rabbit_boot, Marker) of
+ true -> {ok, Marker};
+ _ -> {already_booting, Marker}
+ end.
+
+stop_boot_marker(Marker) ->
+ unlink(Marker),
+ Marker ! stop,
+ ok.
+
+-spec stop() -> 'ok'.
+
+stop() ->
+ case wait_for_ready_or_stopped() of
+ ok ->
+ case rabbit_boot_state:get() of
+ ready ->
+ Product = product_name(),
+ rabbit_log:info("~s is asked to stop...", [Product]),
+ do_stop(),
+ rabbit_log:info(
+ "Successfully stopped ~s and its dependencies",
+ [Product]),
+ ok;
+ stopped ->
+ ok
+ end;
+ _ ->
+ ok
+ end.
+
+do_stop() ->
+ Apps0 = ?APPS ++ rabbit_plugins:active(),
+ %% We ensure that Mnesia is stopped last (or more exactly, after rabbit).
+ Apps1 = app_utils:app_dependency_order(Apps0, true) -- [mnesia],
+ Apps = [mnesia | Apps1],
+ %% this will also perform unregistration with the peer discovery backend
+ %% as needed
+ stop_apps(Apps).
+
+-spec stop_and_halt() -> no_return().
+
+stop_and_halt() ->
+ try
+ stop()
+ catch Type:Reason ->
+ rabbit_log:error(
+ "Error trying to stop ~s: ~p:~p",
+ [product_name(), Type, Reason]),
+ error({Type, Reason})
+ after
+ %% Enclose all the logging in the try block.
+ %% init:stop() will be called regardless of any errors.
+ try
+ AppsLeft = [ A || {A, _, _} <- application:which_applications() ],
+ rabbit_log:info(
+ lists:flatten(["Halting Erlang VM with the following applications:~n",
+ [" ~p~n" || _ <- AppsLeft]]),
+ AppsLeft),
+ %% Also duplicate this information to stderr, so console where
+ %% foreground broker was running (or systemd journal) will
+ %% contain information about graceful termination.
+ io:format(standard_error, "Gracefully halting Erlang VM~n", [])
+ after
+ init:stop()
+ end
+ end,
+ ok.
+
+-spec start_apps([app_name()]) -> 'ok'.
+
+start_apps(Apps) ->
+ start_apps(Apps, #{}).
+
+-spec start_apps([app_name()],
+ #{app_name() => restart_type()}) -> 'ok'.
+
+%% TODO: start_apps/2 and is now specific to plugins. This function
+%% should be moved over `rabbit_plugins`, along with stop_apps/1, once
+%% the latter stops using app_utils as well.
+
+start_apps(Apps, RestartTypes) ->
+ false = lists:member(rabbit, Apps), %% Assertion.
+ %% We need to load all applications involved in order to be able to
+ %% find new feature flags.
+ app_utils:load_applications(Apps),
+ ok = rabbit_feature_flags:refresh_feature_flags_after_app_load(Apps),
+ rabbit_prelaunch_conf:decrypt_config(Apps),
+ lists:foreach(
+ fun(App) ->
+ RestartType = maps:get(App, RestartTypes, temporary),
+ ok = rabbit_boot_steps:run_boot_steps([App]),
+ case application:ensure_all_started(App, RestartType) of
+ {ok, _} -> ok;
+ {error, Reason} -> throw({could_not_start, App, Reason})
+ end
+ end, Apps).
+
+-spec stop_apps([app_name()]) -> 'ok'.
+
+stop_apps([]) ->
+ ok;
+stop_apps(Apps) ->
+ rabbit_log:info(
+ lists:flatten(["Stopping ~s applications and their dependencies in the following order:~n",
+ [" ~p~n" || _ <- Apps]]),
+ [product_name() | lists:reverse(Apps)]),
+ ok = app_utils:stop_applications(
+ Apps, handle_app_error(error_during_shutdown)),
+ case lists:member(rabbit, Apps) of
+ %% plugin deactivation
+ false -> rabbit_boot_steps:run_cleanup_steps(Apps);
+ true -> ok %% it's all going anyway
+ end,
+ ok.
+
+-spec handle_app_error(_) -> fun((_, _) -> no_return()).
+handle_app_error(Term) ->
+ fun(App, {bad_return, {_MFA, {'EXIT', ExitReason}}}) ->
+ throw({Term, App, ExitReason});
+ (App, Reason) ->
+ throw({Term, App, Reason})
+ end.
+
+is_booting() -> is_booting(node()).
+
+is_booting(Node) when Node =:= node() ->
+ case rabbit_boot_state:get() of
+ booting -> true;
+ _ -> false
+ end;
+is_booting(Node) ->
+ case rpc:call(Node, rabbit, is_booting, []) of
+ {badrpc, _} = Err -> Err;
+ Ret -> Ret
+ end.
+
+
+-spec await_startup() -> 'ok' | {'error', 'timeout'}.
+
+await_startup() ->
+ await_startup(node(), false).
+
+-spec await_startup(node() | non_neg_integer()) -> 'ok' | {'error', 'timeout'}.
+
+await_startup(Node) when is_atom(Node) ->
+ await_startup(Node, false);
+ await_startup(Timeout) when is_integer(Timeout) ->
+ await_startup(node(), false, Timeout).
+
+-spec await_startup(node(), boolean()) -> 'ok' | {'error', 'timeout'}.
+
+await_startup(Node, PrintProgressReports) ->
+ case is_booting(Node) of
+ true -> wait_for_boot_to_finish(Node, PrintProgressReports);
+ false ->
+ case is_running(Node) of
+ true -> ok;
+ false -> wait_for_boot_to_start(Node),
+ wait_for_boot_to_finish(Node, PrintProgressReports)
+ end
+ end.
+
+-spec await_startup(node(), boolean(), non_neg_integer()) -> 'ok' | {'error', 'timeout'}.
+
+await_startup(Node, PrintProgressReports, Timeout) ->
+ case is_booting(Node) of
+ true -> wait_for_boot_to_finish(Node, PrintProgressReports, Timeout);
+ false ->
+ case is_running(Node) of
+ true -> ok;
+ false -> wait_for_boot_to_start(Node, Timeout),
+ wait_for_boot_to_finish(Node, PrintProgressReports, Timeout)
+ end
+ end.
+
+wait_for_boot_to_start(Node) ->
+ wait_for_boot_to_start(Node, ?BOOT_START_TIMEOUT).
+
+wait_for_boot_to_start(Node, infinity) ->
+ %% This assumes that 100K iterations is close enough to "infinity".
+ %% Now that's deep.
+ do_wait_for_boot_to_start(Node, 100000);
+wait_for_boot_to_start(Node, Timeout) ->
+ Iterations = Timeout div ?BOOT_STATUS_CHECK_INTERVAL,
+ do_wait_for_boot_to_start(Node, Iterations).
+
+do_wait_for_boot_to_start(_Node, IterationsLeft) when IterationsLeft =< 0 ->
+ {error, timeout};
+do_wait_for_boot_to_start(Node, IterationsLeft) ->
+ case is_booting(Node) of
+ false ->
+ timer:sleep(?BOOT_STATUS_CHECK_INTERVAL),
+ do_wait_for_boot_to_start(Node, IterationsLeft - 1);
+ {badrpc, _} = Err ->
+ Err;
+ true ->
+ ok
+ end.
+
+wait_for_boot_to_finish(Node, PrintProgressReports) ->
+ wait_for_boot_to_finish(Node, PrintProgressReports, ?BOOT_FINISH_TIMEOUT).
+
+wait_for_boot_to_finish(Node, PrintProgressReports, infinity) ->
+ %% This assumes that 100K iterations is close enough to "infinity".
+ %% Now that's deep.
+ do_wait_for_boot_to_finish(Node, PrintProgressReports, 100000);
+wait_for_boot_to_finish(Node, PrintProgressReports, Timeout) ->
+ Iterations = Timeout div ?BOOT_STATUS_CHECK_INTERVAL,
+ do_wait_for_boot_to_finish(Node, PrintProgressReports, Iterations).
+
+do_wait_for_boot_to_finish(_Node, _PrintProgressReports, IterationsLeft) when IterationsLeft =< 0 ->
+ {error, timeout};
+do_wait_for_boot_to_finish(Node, PrintProgressReports, IterationsLeft) ->
+ case is_booting(Node) of
+ false ->
+ %% We don't want badrpc error to be interpreted as false,
+ %% so we don't call rabbit:is_running(Node)
+ case rpc:call(Node, rabbit, is_running, []) of
+ true -> ok;
+ false -> {error, rabbit_is_not_running};
+ {badrpc, _} = Err -> Err
+ end;
+ {badrpc, _} = Err ->
+ Err;
+ true ->
+ maybe_print_boot_progress(PrintProgressReports, IterationsLeft),
+ timer:sleep(?BOOT_STATUS_CHECK_INTERVAL),
+ do_wait_for_boot_to_finish(Node, PrintProgressReports, IterationsLeft - 1)
+ end.
+
+maybe_print_boot_progress(false = _PrintProgressReports, _IterationsLeft) ->
+ ok;
+maybe_print_boot_progress(true, IterationsLeft) ->
+ case IterationsLeft rem 100 of
+ %% This will be printed on the CLI command end to illustrate some
+ %% progress.
+ 0 -> io:format("Still booting, will check again in 10 seconds...~n");
+ _ -> ok
+ end.
+
+-spec status
+ () -> [{pid, integer()} |
+ {running_applications, [{atom(), string(), string()}]} |
+ {os, {atom(), atom()}} |
+ {erlang_version, string()} |
+ {memory, any()}].
+
+status() ->
+ Version = base_product_version(),
+ S1 = [{pid, list_to_integer(os:getpid())},
+ %% The timeout value used is twice that of gen_server:call/2.
+ {running_applications, rabbit_misc:which_applications()},
+ {os, os:type()},
+ {rabbitmq_version, Version},
+ {erlang_version, erlang:system_info(system_version)},
+ {memory, rabbit_vm:memory()},
+ {alarms, alarms()},
+ {is_under_maintenance, rabbit_maintenance:is_being_drained_local_read(node())},
+ {listeners, listeners()},
+ {vm_memory_calculation_strategy, vm_memory_monitor:get_memory_calculation_strategy()}],
+ S2 = rabbit_misc:filter_exit_map(
+ fun ({Key, {M, F, A}}) -> {Key, erlang:apply(M, F, A)} end,
+ [{vm_memory_high_watermark, {vm_memory_monitor,
+ get_vm_memory_high_watermark, []}},
+ {vm_memory_limit, {vm_memory_monitor,
+ get_memory_limit, []}},
+ {disk_free_limit, {rabbit_disk_monitor,
+ get_disk_free_limit, []}},
+ {disk_free, {rabbit_disk_monitor,
+ get_disk_free, []}}]),
+ S3 = rabbit_misc:with_exit_handler(
+ fun () -> [] end,
+ fun () -> [{file_descriptors, file_handle_cache:info()}] end),
+ S4 = [{processes, [{limit, erlang:system_info(process_limit)},
+ {used, erlang:system_info(process_count)}]},
+ {run_queue, erlang:statistics(run_queue)},
+ {uptime, begin
+ {T,_} = erlang:statistics(wall_clock),
+ T div 1000
+ end},
+ {kernel, {net_ticktime, net_kernel:get_net_ticktime()}}],
+ S5 = [{active_plugins, rabbit_plugins:active()},
+ {enabled_plugin_file, rabbit_plugins:enabled_plugins_file()}],
+ S6 = [{config_files, config_files()},
+ {log_files, log_locations()},
+ {data_directory, rabbit_mnesia:dir()},
+ {raft_data_directory, ra_env:data_dir()}],
+ Totals = case is_running() of
+ true ->
+ [{virtual_host_count, rabbit_vhost:count()},
+ {connection_count,
+ length(rabbit_networking:connections_local())},
+ {queue_count, total_queue_count()}];
+ false ->
+ []
+ end,
+ S7 = [{totals, Totals}],
+ S8 = lists:filter(
+ fun
+ ({product_base_name, _}) -> true;
+ ({product_base_version, _}) -> true;
+ ({product_name, _}) -> true;
+ ({product_version, _}) -> true;
+ (_) -> false
+ end,
+ maps:to_list(product_info())),
+ S1 ++ S2 ++ S3 ++ S4 ++ S5 ++ S6 ++ S7 ++ S8.
+
+alarms() ->
+ Alarms = rabbit_misc:with_exit_handler(rabbit_misc:const([]),
+ fun rabbit_alarm:get_alarms/0),
+ N = node(),
+ %% [{{resource_limit,memory,rabbit@mercurio},[]}]
+ [{resource_limit, Limit, Node} || {{resource_limit, Limit, Node}, _} <- Alarms, Node =:= N].
+
+listeners() ->
+ Listeners = try
+ rabbit_networking:active_listeners()
+ catch
+ exit:{aborted, _} -> []
+ end,
+ [L || L = #listener{node = Node} <- Listeners, Node =:= node()].
+
+total_queue_count() ->
+ lists:foldl(fun (VirtualHost, Acc) ->
+ Acc + rabbit_amqqueue:count(VirtualHost)
+ end,
+ 0, rabbit_vhost:list_names()).
+
+-spec is_running() -> boolean().
+
+is_running() -> is_running(node()).
+
+-spec is_running(node()) -> boolean().
+
+is_running(Node) when Node =:= node() ->
+ case rabbit_boot_state:get() of
+ ready -> true;
+ _ -> false
+ end;
+is_running(Node) ->
+ case rpc:call(Node, rabbit, is_running, []) of
+ true -> true;
+ _ -> false
+ end.
+
+is_booted() -> is_booted(node()).
+
+is_booted(Node) ->
+ case is_booting(Node) of
+ false ->
+ is_running(Node);
+ _ -> false
+ end.
+
+-spec environment() -> [{param(), term()}].
+
+environment() ->
+ %% The timeout value is twice that of gen_server:call/2.
+ [{A, environment(A)} ||
+ {A, _, _} <- lists:keysort(1, application:which_applications(10000))].
+
+environment(App) ->
+ Ignore = [default_pass, included_applications],
+ lists:keysort(1, [P || P = {K, _} <- application:get_all_env(App),
+ not lists:member(K, Ignore)]).
+
+-spec rotate_logs() -> rabbit_types:ok_or_error(any()).
+
+rotate_logs() ->
+ rabbit_lager:fold_sinks(
+ fun
+ (_, [], Acc) ->
+ Acc;
+ (SinkName, FileNames, Acc) ->
+ lager:log(SinkName, info, self(),
+ "Log file rotation forced", []),
+ %% FIXME: We use an internal message, understood by
+ %% lager_file_backend. We should use a proper API, when
+ %% it's added to Lager.
+ %%
+ %% FIXME: This call is effectively asynchronous: at the
+ %% end of this function, we can't guaranty the rotation
+ %% is completed.
+ [ok = gen_event:call(SinkName,
+ {lager_file_backend, FileName},
+ rotate,
+ infinity) || FileName <- FileNames],
+ lager:log(SinkName, info, self(),
+ "Log file re-opened after forced rotation", []),
+ Acc
+ end, ok).
+
+%%--------------------------------------------------------------------
+
+-spec start('normal',[]) ->
+ {'error',
+ {'erlang_version_too_old',
+ {'found',string(),string()},
+ {'required',string(),string()}}} |
+ {'ok',pid()}.
+
+start(normal, []) ->
+ %% Reset boot state and clear the stop reason again (it was already
+ %% made in rabbitmq_prelaunch).
+ %%
+ %% This is important if the previous startup attempt failed after
+ %% rabbitmq_prelaunch was started and the application is still
+ %% running.
+ rabbit_boot_state:set(booting),
+ rabbit_prelaunch:clear_stop_reason(),
+
+ try
+ run_prelaunch_second_phase(),
+
+ ProductInfo = product_info(),
+ case ProductInfo of
+ #{product_overridden := true,
+ product_base_name := BaseName,
+ product_base_version := BaseVersion} ->
+ rabbit_log:info("~n Starting ~s ~s on Erlang ~s~n Based on ~s ~s~n ~s~n ~s~n",
+ [product_name(), product_version(), rabbit_misc:otp_release(),
+ BaseName, BaseVersion,
+ ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]);
+ _ ->
+ rabbit_log:info("~n Starting ~s ~s on Erlang ~s~n ~s~n ~s~n",
+ [product_name(), product_version(), rabbit_misc:otp_release(),
+ ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE])
+ end,
+ log_motd(),
+ {ok, SupPid} = rabbit_sup:start_link(),
+
+ %% Compatibility with older RabbitMQ versions + required by
+ %% rabbit_node_monitor:notify_node_up/0:
+ %%
+ %% We register the app process under the name `rabbit`. This is
+ %% checked by `is_running(Node)` on a remote node. The process
+ %% is also monitord by rabbit_node_monitor.
+ %%
+ %% The process name must be registered *before* running the boot
+ %% steps: that's when rabbit_node_monitor will set the process
+ %% monitor up.
+ %%
+ %% Note that plugins were not taken care of at this point
+ %% either.
+ rabbit_log_prelaunch:debug(
+ "Register `rabbit` process (~p) for rabbit_node_monitor",
+ [self()]),
+ true = register(rabbit, self()),
+
+ print_banner(),
+ log_banner(),
+ warn_if_kernel_config_dubious(),
+ warn_if_disc_io_options_dubious(),
+ %% We run `rabbit` boot steps only for now. Plugins boot steps
+ %% will be executed as part of the postlaunch phase after they
+ %% are started.
+ rabbit_boot_steps:run_boot_steps([rabbit]),
+ run_postlaunch_phase(),
+ {ok, SupPid}
+ catch
+ throw:{error, _} = Error ->
+ mnesia:stop(),
+ rabbit_prelaunch_errors:log_error(Error),
+ rabbit_prelaunch:set_stop_reason(Error),
+ rabbit_boot_state:set(stopped),
+ Error;
+ Class:Exception:Stacktrace ->
+ mnesia:stop(),
+ rabbit_prelaunch_errors:log_exception(
+ Class, Exception, Stacktrace),
+ Error = {error, Exception},
+ rabbit_prelaunch:set_stop_reason(Error),
+ rabbit_boot_state:set(stopped),
+ Error
+ end.
+
+run_postlaunch_phase() ->
+ spawn(fun() -> do_run_postlaunch_phase() end).
+
+do_run_postlaunch_phase() ->
+ %% Once RabbitMQ itself is started, we need to run a few more steps,
+ %% in particular start plugins.
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Postlaunch phase =="),
+
+ try
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Plugins =="),
+
+ rabbit_log_prelaunch:debug("Setting plugins up"),
+ %% `Plugins` contains all the enabled plugins, plus their
+ %% dependencies. The order is important: dependencies appear
+ %% before plugin which depend on them.
+ Plugins = rabbit_plugins:setup(),
+ rabbit_log_prelaunch:debug(
+ "Starting the following plugins: ~p", [Plugins]),
+ %% We can load all plugins and refresh their feature flags at
+ %% once, because it does not involve running code from the
+ %% plugins.
+ app_utils:load_applications(Plugins),
+ ok = rabbit_feature_flags:refresh_feature_flags_after_app_load(
+ Plugins),
+ %% However, we want to run their boot steps and actually start
+ %% them one by one, to ensure a dependency is fully started
+ %% before a plugin which depends on it gets a chance to start.
+ lists:foreach(
+ fun(Plugin) ->
+ ok = rabbit_boot_steps:run_boot_steps([Plugin]),
+ case application:ensure_all_started(Plugin) of
+ {ok, _} -> ok;
+ Error -> throw(Error)
+ end
+ end, Plugins),
+
+ %% Successful boot resets node maintenance state.
+ rabbit_log_prelaunch:info("Resetting node maintenance status"),
+ _ = rabbit_maintenance:unmark_as_being_drained(),
+
+ %% Export definitions after all plugins have been enabled,
+ %% see rabbitmq/rabbitmq-server#2384
+ case rabbit_definitions:maybe_load_definitions() of
+ ok -> ok;
+ DefLoadError -> throw(DefLoadError)
+ end,
+
+ %% Start listeners after all plugins have been enabled,
+ %% see rabbitmq/rabbitmq-server#2405.
+ rabbit_log_prelaunch:info(
+ "Ready to start client connection listeners"),
+ ok = rabbit_networking:boot(),
+
+ %% The node is ready: mark it as such and log it.
+ %% NOTE: PLEASE DO NOT ADD CRITICAL NODE STARTUP CODE AFTER THIS.
+ ok = rabbit_lager:broker_is_started(),
+ ok = log_broker_started(
+ rabbit_plugins:strictly_plugins(rabbit_plugins:active())),
+
+ rabbit_log_prelaunch:debug("Marking ~s as running", [product_name()]),
+ rabbit_boot_state:set(ready)
+ catch
+ throw:{error, _} = Error ->
+ rabbit_prelaunch_errors:log_error(Error),
+ rabbit_prelaunch:set_stop_reason(Error),
+ do_stop();
+ Class:Exception:Stacktrace ->
+ rabbit_prelaunch_errors:log_exception(
+ Class, Exception, Stacktrace),
+ Error = {error, Exception},
+ rabbit_prelaunch:set_stop_reason(Error),
+ do_stop()
+ end.
+
+prep_stop(State) ->
+ rabbit_boot_state:set(stopping),
+ rabbit_peer_discovery:maybe_unregister(),
+ State.
+
+-spec stop(_) -> 'ok'.
+
+stop(State) ->
+ ok = rabbit_alarm:stop(),
+ ok = case rabbit_mnesia:is_clustered() of
+ true -> ok;
+ false -> rabbit_table:clear_ram_only_tables()
+ end,
+ case State of
+ [] -> rabbit_prelaunch:set_stop_reason(normal);
+ _ -> rabbit_prelaunch:set_stop_reason(State)
+ end,
+ rabbit_boot_state:set(stopped),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% boot step functions
+
+-spec boot_delegate() -> 'ok'.
+
+boot_delegate() ->
+ {ok, Count} = application:get_env(rabbit, delegate_count),
+ rabbit_sup:start_supervisor_child(delegate_sup, [Count]).
+
+-spec recover() -> 'ok'.
+
+recover() ->
+ ok = rabbit_policy:recover(),
+ ok = rabbit_vhost:recover(),
+ ok = lager_exchange_backend:maybe_init_exchange().
+
+-spec maybe_insert_default_data() -> 'ok'.
+
+maybe_insert_default_data() ->
+ NoDefsToImport = not rabbit_definitions:has_configured_definitions_to_load(),
+ case rabbit_table:needs_default_data() andalso NoDefsToImport of
+ true ->
+ rabbit_log:info("Will seed default virtual host and user..."),
+ insert_default_data();
+ false ->
+ rabbit_log:info("Will not seed default virtual host and user: have definitions to load..."),
+ ok
+ end.
+
+insert_default_data() ->
+ {ok, DefaultUser} = application:get_env(default_user),
+ {ok, DefaultPass} = application:get_env(default_pass),
+ {ok, DefaultTags} = application:get_env(default_user_tags),
+ {ok, DefaultVHost} = application:get_env(default_vhost),
+ {ok, [DefaultConfigurePerm, DefaultWritePerm, DefaultReadPerm]} =
+ application:get_env(default_permissions),
+
+ DefaultUserBin = rabbit_data_coercion:to_binary(DefaultUser),
+ DefaultPassBin = rabbit_data_coercion:to_binary(DefaultPass),
+ DefaultVHostBin = rabbit_data_coercion:to_binary(DefaultVHost),
+ DefaultConfigurePermBin = rabbit_data_coercion:to_binary(DefaultConfigurePerm),
+ DefaultWritePermBin = rabbit_data_coercion:to_binary(DefaultWritePerm),
+ DefaultReadPermBin = rabbit_data_coercion:to_binary(DefaultReadPerm),
+
+ ok = rabbit_vhost:add(DefaultVHostBin, <<"Default virtual host">>, [], ?INTERNAL_USER),
+ ok = lager_exchange_backend:maybe_init_exchange(),
+ ok = rabbit_auth_backend_internal:add_user(
+ DefaultUserBin,
+ DefaultPassBin,
+ ?INTERNAL_USER
+ ),
+ ok = rabbit_auth_backend_internal:set_tags(DefaultUserBin, DefaultTags,
+ ?INTERNAL_USER),
+ ok = rabbit_auth_backend_internal:set_permissions(DefaultUserBin,
+ DefaultVHostBin,
+ DefaultConfigurePermBin,
+ DefaultWritePermBin,
+ DefaultReadPermBin,
+ ?INTERNAL_USER),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% logging
+
+-spec log_locations() -> [rabbit_lager:log_location()].
+log_locations() ->
+ rabbit_lager:log_locations().
+
+-spec config_locations() -> [rabbit_config:config_location()].
+config_locations() ->
+ rabbit_config:config_files().
+
+-spec force_event_refresh(reference()) -> 'ok'.
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+force_event_refresh(Ref) ->
+ % direct connections, e.g. MQTT, STOMP
+ ok = rabbit_direct:force_event_refresh(Ref),
+ % AMQP connections
+ ok = rabbit_networking:force_connection_event_refresh(Ref),
+ % "external" connections, which are not handled by the "AMQP core",
+ % e.g. connections to the stream plugin
+ ok = rabbit_networking:force_non_amqp_connection_event_refresh(Ref),
+ ok = rabbit_channel:force_event_refresh(Ref),
+ ok = rabbit_amqqueue:force_event_refresh(Ref).
+
+%%---------------------------------------------------------------------------
+%% misc
+
+log_broker_started(Plugins) ->
+ PluginList = iolist_to_binary([rabbit_misc:format(" * ~s~n", [P])
+ || P <- Plugins]),
+ Message = string:strip(rabbit_misc:format(
+ "Server startup complete; ~b plugins started.~n~s",
+ [length(Plugins), PluginList]), right, $\n),
+ rabbit_log:info(Message),
+ io:format(" completed with ~p plugins.~n", [length(Plugins)]).
+
+-define(RABBIT_TEXT_LOGO,
+ "~n ## ## ~s ~s"
+ "~n ## ##"
+ "~n ########## ~s"
+ "~n ###### ##"
+ "~n ########## ~s").
+-define(FG8_START, "\033[38;5;202m").
+-define(BG8_START, "\033[48;5;202m").
+-define(FG32_START, "\033[38;2;255;102;0m").
+-define(BG32_START, "\033[48;2;255;102;0m").
+-define(C_END, "\033[0m").
+-define(RABBIT_8BITCOLOR_LOGO,
+ "~n " ?BG8_START " " ?C_END " " ?BG8_START " " ?C_END " \033[1m" ?FG8_START "~s" ?C_END " ~s"
+ "~n " ?BG8_START " " ?C_END " " ?BG8_START " " ?C_END
+ "~n " ?BG8_START " " ?C_END " ~s"
+ "~n " ?BG8_START " " ?C_END " " ?BG8_START " " ?C_END
+ "~n " ?BG8_START " " ?C_END " ~s").
+-define(RABBIT_32BITCOLOR_LOGO,
+ "~n " ?BG32_START " " ?C_END " " ?BG32_START " " ?C_END " \033[1m" ?FG32_START "~s" ?C_END " ~s"
+ "~n " ?BG32_START " " ?C_END " " ?BG32_START " " ?C_END
+ "~n " ?BG32_START " " ?C_END " ~s"
+ "~n " ?BG32_START " " ?C_END " " ?BG32_START " " ?C_END
+ "~n " ?BG32_START " " ?C_END " ~s").
+
+print_banner() ->
+ Product = product_name(),
+ Version = product_version(),
+ LineListFormatter = fun (Placeholder, [_ | Tail] = LL) ->
+ LF = lists:flatten([Placeholder || _ <- lists:seq(1, length(Tail))]),
+ {LF, LL};
+ (_, []) ->
+ {"", ["(none)"]}
+ end,
+ Logo = case rabbit_prelaunch:get_context() of
+ %% We use the colored logo only when running the
+ %% interactive shell and when colors are supported.
+ %%
+ %% Basically it means it will be used on Unix when
+ %% running "make run-broker" and that's about it.
+ #{os_type := {unix, darwin},
+ interactive_shell := true,
+ output_supports_colors := true} -> ?RABBIT_8BITCOLOR_LOGO;
+ #{interactive_shell := true,
+ output_supports_colors := true} -> ?RABBIT_32BITCOLOR_LOGO;
+ _ -> ?RABBIT_TEXT_LOGO
+ end,
+ %% padded list lines
+ {LogFmt, LogLocations} = LineListFormatter("~n ~ts", log_locations()),
+ {CfgFmt, CfgLocations} = LineListFormatter("~n ~ts", config_locations()),
+ {MOTDFormat, MOTDArgs} = case motd() of
+ undefined ->
+ {"", []};
+ MOTD ->
+ Lines = string:split(MOTD, "\n", all),
+ Padded = [case Line of
+ <<>> -> "\n";
+ _ -> [" ", Line, "\n"]
+ end
+ || Line <- Lines],
+ {"~n~ts", [Padded]}
+ end,
+ io:format(Logo ++
+ "~n" ++
+ MOTDFormat ++
+ "~n Doc guides: https://rabbitmq.com/documentation.html"
+ "~n Support: https://rabbitmq.com/contact.html"
+ "~n Tutorials: https://rabbitmq.com/getstarted.html"
+ "~n Monitoring: https://rabbitmq.com/monitoring.html"
+ "~n"
+ "~n Logs: ~ts" ++ LogFmt ++ "~n"
+ "~n Config file(s): ~ts" ++ CfgFmt ++ "~n"
+ "~n Starting broker...",
+ [Product, Version, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE] ++
+ MOTDArgs ++
+ LogLocations ++
+ CfgLocations).
+
+log_motd() ->
+ case motd() of
+ undefined ->
+ ok;
+ MOTD ->
+ Lines = string:split(MOTD, "\n", all),
+ Padded = [case Line of
+ <<>> -> "\n";
+ _ -> [" ", Line, "\n"]
+ end
+ || Line <- Lines],
+ rabbit_log:info("~n~ts", [string:trim(Padded, trailing, [$\r, $\n])])
+ end.
+
+log_banner() ->
+ {FirstLog, OtherLogs} = case log_locations() of
+ [Head | Tail] ->
+ {Head, [{"", F} || F <- Tail]};
+ [] ->
+ {"(none)", []}
+ end,
+ Settings = [{"node", node()},
+ {"home dir", home_dir()},
+ {"config file(s)", config_files()},
+ {"cookie hash", rabbit_nodes:cookie_hash()},
+ {"log(s)", FirstLog}] ++
+ OtherLogs ++
+ [{"database dir", rabbit_mnesia:dir()}],
+ DescrLen = 1 + lists:max([length(K) || {K, _V} <- Settings]),
+ Format = fun (K, V) ->
+ rabbit_misc:format(
+ " ~-" ++ integer_to_list(DescrLen) ++ "s: ~ts~n", [K, V])
+ end,
+ Banner = string:strip(lists:flatten(
+ [case S of
+ {"config file(s)" = K, []} ->
+ Format(K, "(none)");
+ {"config file(s)" = K, [V0 | Vs]} ->
+ [Format(K, V0) | [Format("", V) || V <- Vs]];
+ {K, V} ->
+ Format(K, V)
+ end || S <- Settings]), right, $\n),
+ rabbit_log:info("~n~ts", [Banner]).
+
+warn_if_kernel_config_dubious() ->
+ case os:type() of
+ {win32, _} ->
+ ok;
+ _ ->
+ case erlang:system_info(kernel_poll) of
+ true -> ok;
+ false -> rabbit_log:warning(
+ "Kernel poll (epoll, kqueue, etc) is disabled. Throughput "
+ "and CPU utilization may worsen.~n")
+ end
+ end,
+ AsyncThreads = erlang:system_info(thread_pool_size),
+ case AsyncThreads < ?ASYNC_THREADS_WARNING_THRESHOLD of
+ true -> rabbit_log:warning(
+ "Erlang VM is running with ~b I/O threads, "
+ "file I/O performance may worsen~n", [AsyncThreads]);
+ false -> ok
+ end,
+ IDCOpts = case application:get_env(kernel, inet_default_connect_options) of
+ undefined -> [];
+ {ok, Val} -> Val
+ end,
+ case proplists:get_value(nodelay, IDCOpts, false) of
+ false -> rabbit_log:warning("Nagle's algorithm is enabled for sockets, "
+ "network I/O latency will be higher~n");
+ true -> ok
+ end.
+
+warn_if_disc_io_options_dubious() ->
+ %% if these values are not set, it doesn't matter since
+ %% rabbit_variable_queue will pick up the values defined in the
+ %% IO_BATCH_SIZE and CREDIT_DISC_BOUND constants.
+ CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
+ undefined),
+ IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
+ undefined),
+ case catch validate_msg_store_io_batch_size_and_credit_disc_bound(
+ CreditDiscBound, IoBatchSize) of
+ ok -> ok;
+ {error, {Reason, Vars}} ->
+ rabbit_log:warning(Reason, Vars)
+ end.
+
+validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound,
+ IoBatchSize) ->
+ case IoBatchSize of
+ undefined ->
+ ok;
+ IoBatchSize when is_integer(IoBatchSize) ->
+ if IoBatchSize < ?IO_BATCH_SIZE ->
+ throw({error,
+ {"io_batch_size of ~b lower than recommended value ~b, "
+ "paging performance may worsen~n",
+ [IoBatchSize, ?IO_BATCH_SIZE]}});
+ true ->
+ ok
+ end;
+ IoBatchSize ->
+ throw({error,
+ {"io_batch_size should be an integer, but ~b given",
+ [IoBatchSize]}})
+ end,
+
+ %% CreditDiscBound = {InitialCredit, MoreCreditAfter}
+ {RIC, RMCA} = ?CREDIT_DISC_BOUND,
+ case CreditDiscBound of
+ undefined ->
+ ok;
+ {IC, MCA} when is_integer(IC), is_integer(MCA) ->
+ if IC < RIC; MCA < RMCA ->
+ throw({error,
+ {"msg_store_credit_disc_bound {~b, ~b} lower than"
+ "recommended value {~b, ~b},"
+ " paging performance may worsen~n",
+ [IC, MCA, RIC, RMCA]}});
+ true ->
+ ok
+ end;
+ {IC, MCA} ->
+ throw({error,
+ {"both msg_store_credit_disc_bound values should be integers, but ~p given",
+ [{IC, MCA}]}});
+ CreditDiscBound ->
+ throw({error,
+ {"invalid msg_store_credit_disc_bound value given: ~p",
+ [CreditDiscBound]}})
+ end,
+
+ case {CreditDiscBound, IoBatchSize} of
+ {undefined, undefined} ->
+ ok;
+ {_CDB, undefined} ->
+ ok;
+ {undefined, _IBS} ->
+ ok;
+ {{InitialCredit, _MCA}, IoBatchSize} ->
+ if IoBatchSize < InitialCredit ->
+ throw(
+ {error,
+ {"msg_store_io_batch_size ~b should be bigger than the initial "
+ "credit value from msg_store_credit_disc_bound ~b,"
+ " paging performance may worsen~n",
+ [IoBatchSize, InitialCredit]}});
+ true ->
+ ok
+ end
+ end.
+
+-spec product_name() -> string().
+
+product_name() ->
+ case product_info() of
+ #{product_name := ProductName} -> ProductName;
+ #{product_base_name := BaseName} -> BaseName
+ end.
+
+-spec product_version() -> string().
+
+product_version() ->
+ case product_info() of
+ #{product_version := ProductVersion} -> ProductVersion;
+ #{product_base_version := BaseVersion} -> BaseVersion
+ end.
+
+-spec product_info() -> #{product_base_name := string(),
+ product_base_version := string(),
+ product_overridden := boolean(),
+ product_name => string(),
+ product_version => string(),
+ otp_release := string()}.
+
+product_info() ->
+ PTKey = {?MODULE, product},
+ try
+ %% The value is cached the first time to avoid calling the
+ %% application master many times just for that.
+ persistent_term:get(PTKey)
+ catch
+ error:badarg ->
+ BaseName = base_product_name(),
+ BaseVersion = base_product_version(),
+ Info0 = #{product_base_name => BaseName,
+ product_base_version => BaseVersion,
+ otp_release => rabbit_misc:otp_release()},
+
+ {NameFromEnv, VersionFromEnv} =
+ case rabbit_prelaunch:get_context() of
+ #{product_name := NFE,
+ product_version := VFE} -> {NFE, VFE};
+ _ -> {undefined, undefined}
+ end,
+
+ Info1 = case NameFromEnv of
+ undefined ->
+ NameFromApp = string_from_app_env(
+ product_name,
+ undefined),
+ case NameFromApp of
+ undefined ->
+ Info0;
+ _ ->
+ Info0#{product_name => NameFromApp,
+ product_overridden => true}
+ end;
+ _ ->
+ Info0#{product_name => NameFromEnv,
+ product_overridden => true}
+ end,
+
+ Info2 = case VersionFromEnv of
+ undefined ->
+ VersionFromApp = string_from_app_env(
+ product_version,
+ undefined),
+ case VersionFromApp of
+ undefined ->
+ Info1;
+ _ ->
+ Info1#{product_version => VersionFromApp,
+ product_overridden => true}
+ end;
+ _ ->
+ Info1#{product_version => VersionFromEnv,
+ product_overridden => true}
+ end,
+ persistent_term:put(PTKey, Info2),
+ Info2
+ end.
+
+string_from_app_env(Key, Default) ->
+ case application:get_env(rabbit, Key) of
+ {ok, Val} ->
+ case io_lib:deep_char_list(Val) of
+ true ->
+ case lists:flatten(Val) of
+ "" -> Default;
+ String -> String
+ end;
+ false ->
+ Default
+ end;
+ undefined ->
+ Default
+ end.
+
+base_product_name() ->
+ %% This function assumes the `rabbit` application was loaded in
+ %% product_info().
+ {ok, Product} = application:get_key(rabbit, description),
+ Product.
+
+base_product_version() ->
+ %% This function assumes the `rabbit` application was loaded in
+ %% product_info().
+ rabbit_misc:version().
+
+motd_file() ->
+ %% Precendence is:
+ %% 1. The environment variable;
+ %% 2. The `motd_file` configuration parameter;
+ %% 3. The default value.
+ Context = rabbit_prelaunch:get_context(),
+ case Context of
+ #{motd_file := File,
+ var_origins := #{motd_file := environment}}
+ when File =/= undefined ->
+ File;
+ _ ->
+ Default = case Context of
+ #{motd_file := File} -> File;
+ _ -> undefined
+ end,
+ string_from_app_env(motd_file, Default)
+ end.
+
+motd() ->
+ case motd_file() of
+ undefined ->
+ undefined;
+ File ->
+ case file:read_file(File) of
+ {ok, MOTD} -> string:trim(MOTD, trailing, [$\r,$\n]);
+ {error, _} -> undefined
+ end
+ end.
+
+home_dir() ->
+ case init:get_argument(home) of
+ {ok, [[Home]]} -> Home;
+ Other -> Other
+ end.
+
+config_files() ->
+ rabbit_config:config_files().
+
+%% We don't want this in fhc since it references rabbit stuff. And we can't put
+%% this in the bootstep directly.
+start_fhc() ->
+ ok = rabbit_sup:start_restartable_child(
+ file_handle_cache,
+ [fun rabbit_alarm:set_alarm/1, fun rabbit_alarm:clear_alarm/1]),
+ ensure_working_fhc().
+
+ensure_working_fhc() ->
+ %% To test the file handle cache, we simply read a file we know it
+ %% exists (Erlang kernel's .app file).
+ %%
+ %% To avoid any pollution of the application process' dictionary by
+ %% file_handle_cache, we spawn a separate process.
+ Parent = self(),
+ TestFun = fun() ->
+ ReadBuf = case application:get_env(rabbit, fhc_read_buffering) of
+ {ok, true} -> "ON";
+ {ok, false} -> "OFF"
+ end,
+ WriteBuf = case application:get_env(rabbit, fhc_write_buffering) of
+ {ok, true} -> "ON";
+ {ok, false} -> "OFF"
+ end,
+ rabbit_log:info("FHC read buffering: ~s~n", [ReadBuf]),
+ rabbit_log:info("FHC write buffering: ~s~n", [WriteBuf]),
+ Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"),
+ {ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []),
+ {ok, _} = file_handle_cache:read(Fd, 1),
+ ok = file_handle_cache:close(Fd),
+ Parent ! fhc_ok
+ end,
+ TestPid = spawn_link(TestFun),
+ %% Because we are waiting for the test fun, abuse the
+ %% 'mnesia_table_loading_retry_timeout' parameter to find a sane timeout
+ %% value.
+ Timeout = rabbit_table:retry_timeout(),
+ receive
+ fhc_ok -> ok;
+ {'EXIT', TestPid, Exception} -> throw({ensure_working_fhc, Exception})
+ after Timeout ->
+ throw({ensure_working_fhc, {timeout, TestPid}})
+ end.
diff --git a/deps/rabbit/src/rabbit_access_control.erl b/deps/rabbit/src/rabbit_access_control.erl
new file mode 100644
index 0000000000..72260d5723
--- /dev/null
+++ b/deps/rabbit/src/rabbit_access_control.erl
@@ -0,0 +1,257 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_access_control).
+
+-include("rabbit.hrl").
+
+-export([check_user_pass_login/2, check_user_login/2, check_user_loopback/2,
+ check_vhost_access/4, check_resource_access/4, check_topic_access/4]).
+
+-export([permission_cache_can_expire/1, update_state/2]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([permission_atom/0]).
+
+-type permission_atom() :: 'configure' | 'read' | 'write'.
+
+%%----------------------------------------------------------------------------
+
+-spec check_user_pass_login
+ (rabbit_types:username(), rabbit_types:password()) ->
+ {'ok', rabbit_types:user()} |
+ {'refused', rabbit_types:username(), string(), [any()]}.
+
+check_user_pass_login(Username, Password) ->
+ check_user_login(Username, [{password, Password}]).
+
+-spec check_user_login
+ (rabbit_types:username(), [{atom(), any()}]) ->
+ {'ok', rabbit_types:user()} |
+ {'refused', rabbit_types:username(), string(), [any()]}.
+
+check_user_login(Username, AuthProps) ->
+ %% extra auth properties like MQTT client id are in AuthProps
+ {ok, Modules} = application:get_env(rabbit, auth_backends),
+ R = lists:foldl(
+ fun (rabbit_auth_backend_cache=ModN, {refused, _, _, _}) ->
+ %% It is possible to specify authn/authz within the cache module settings,
+ %% so we have to do both auth steps here
+ %% See this rabbitmq-users discussion:
+ %% https://groups.google.com/d/topic/rabbitmq-users/ObqM7MQdA3I/discussion
+ try_authenticate_and_try_authorize(ModN, ModN, Username, AuthProps);
+ ({ModN, ModZs}, {refused, _, _, _}) ->
+ %% Different modules for authN vs authZ. So authenticate
+ %% with authN module, then if that succeeds do
+ %% passwordless (i.e pre-authenticated) login with authZ.
+ try_authenticate_and_try_authorize(ModN, ModZs, Username, AuthProps);
+ (Mod, {refused, _, _, _}) ->
+ %% Same module for authN and authZ. Just take the result
+ %% it gives us
+ case try_authenticate(Mod, Username, AuthProps) of
+ {ok, ModNUser = #auth_user{username = Username2, impl = Impl}} ->
+ rabbit_log:debug("User '~s' authenticated successfully by backend ~s", [Username2, Mod]),
+ user(ModNUser, {ok, [{Mod, Impl}], []});
+ Else ->
+ rabbit_log:debug("User '~s' failed authenticatation by backend ~s", [Username, Mod]),
+ Else
+ end;
+ (_, {ok, User}) ->
+ %% We've successfully authenticated. Skip to the end...
+ {ok, User}
+ end,
+ {refused, Username, "No modules checked '~s'", [Username]}, Modules),
+ R.
+
+try_authenticate_and_try_authorize(ModN, ModZs0, Username, AuthProps) ->
+ ModZs = case ModZs0 of
+ A when is_atom(A) -> [A];
+ L when is_list(L) -> L
+ end,
+ case try_authenticate(ModN, Username, AuthProps) of
+ {ok, ModNUser = #auth_user{username = Username2}} ->
+ rabbit_log:debug("User '~s' authenticated successfully by backend ~s", [Username2, ModN]),
+ user(ModNUser, try_authorize(ModZs, Username2, AuthProps));
+ Else ->
+ Else
+ end.
+
+try_authenticate(Module, Username, AuthProps) ->
+ case Module:user_login_authentication(Username, AuthProps) of
+ {ok, AuthUser} -> {ok, AuthUser};
+ {error, E} -> {refused, Username,
+ "~s failed authenticating ~s: ~p~n",
+ [Module, Username, E]};
+ {refused, F, A} -> {refused, Username, F, A}
+ end.
+
+try_authorize(Modules, Username, AuthProps) ->
+ lists:foldr(
+ fun (Module, {ok, ModsImpls, ModsTags}) ->
+ case Module:user_login_authorization(Username, AuthProps) of
+ {ok, Impl, Tags}-> {ok, [{Module, Impl} | ModsImpls], ModsTags ++ Tags};
+ {ok, Impl} -> {ok, [{Module, Impl} | ModsImpls], ModsTags};
+ {error, E} -> {refused, Username,
+ "~s failed authorizing ~s: ~p~n",
+ [Module, Username, E]};
+ {refused, F, A} -> {refused, Username, F, A}
+ end;
+ (_, {refused, F, A}) ->
+ {refused, Username, F, A}
+ end, {ok, [], []}, Modules).
+
+user(#auth_user{username = Username, tags = Tags}, {ok, ModZImpls, ModZTags}) ->
+ {ok, #user{username = Username,
+ tags = Tags ++ ModZTags,
+ authz_backends = ModZImpls}};
+user(_AuthUser, Error) ->
+ Error.
+
+auth_user(#user{username = Username, tags = Tags}, Impl) ->
+ #auth_user{username = Username,
+ tags = Tags,
+ impl = Impl}.
+
+-spec check_user_loopback
+ (rabbit_types:username(), rabbit_net:socket() | inet:ip_address()) ->
+ 'ok' | 'not_allowed'.
+
+check_user_loopback(Username, SockOrAddr) ->
+ {ok, Users} = application:get_env(rabbit, loopback_users),
+ case rabbit_net:is_loopback(SockOrAddr)
+ orelse not lists:member(Username, Users) of
+ true -> ok;
+ false -> not_allowed
+ end.
+
+get_authz_data_from({ip, Address}) ->
+ #{peeraddr => Address};
+get_authz_data_from({socket, Sock}) ->
+ {ok, {Address, _Port}} = rabbit_net:peername(Sock),
+ #{peeraddr => Address};
+get_authz_data_from(undefined) ->
+ undefined.
+
+% Note: ip can be either a tuple or, a binary if reverse_dns_lookups
+% is enabled and it's a direct connection.
+-spec check_vhost_access(User :: rabbit_types:user(),
+ VHostPath :: rabbit_types:vhost(),
+ AuthzRawData :: {socket, rabbit_net:socket()} | {ip, inet:ip_address() | binary()} | undefined,
+ AuthzContext :: map()) ->
+ 'ok' | rabbit_types:channel_exit().
+check_vhost_access(User = #user{username = Username,
+ authz_backends = Modules}, VHostPath, AuthzRawData, AuthzContext) ->
+ AuthzData = get_authz_data_from(AuthzRawData),
+ FullAuthzContext = create_vhost_access_authz_data(AuthzData, AuthzContext),
+ lists:foldl(
+ fun({Mod, Impl}, ok) ->
+ check_access(
+ fun() ->
+ rabbit_vhost:exists(VHostPath) andalso
+ Mod:check_vhost_access(
+ auth_user(User, Impl), VHostPath, FullAuthzContext)
+ end,
+ Mod, "access to vhost '~s' refused for user '~s'",
+ [VHostPath, Username], not_allowed);
+ (_, Else) ->
+ Else
+ end, ok, Modules).
+
+create_vhost_access_authz_data(undefined, Context) when map_size(Context) == 0 ->
+ undefined;
+create_vhost_access_authz_data(undefined, Context) ->
+ Context;
+create_vhost_access_authz_data(PeerAddr, Context) when map_size(Context) == 0 ->
+ PeerAddr;
+create_vhost_access_authz_data(PeerAddr, Context) ->
+ maps:merge(PeerAddr, Context).
+
+-spec check_resource_access
+ (rabbit_types:user(), rabbit_types:r(atom()), permission_atom(), rabbit_types:authz_context()) ->
+ 'ok' | rabbit_types:channel_exit().
+
+check_resource_access(User, R = #resource{kind = exchange, name = <<"">>},
+ Permission, Context) ->
+ check_resource_access(User, R#resource{name = <<"amq.default">>},
+ Permission, Context);
+check_resource_access(User = #user{username = Username,
+ authz_backends = Modules},
+ Resource, Permission, Context) ->
+ lists:foldl(
+ fun({Module, Impl}, ok) ->
+ check_access(
+ fun() -> Module:check_resource_access(
+ auth_user(User, Impl), Resource, Permission, Context) end,
+ Module, "access to ~s refused for user '~s'",
+ [rabbit_misc:rs(Resource), Username]);
+ (_, Else) -> Else
+ end, ok, Modules).
+
+check_topic_access(User = #user{username = Username,
+ authz_backends = Modules},
+ Resource, Permission, Context) ->
+ lists:foldl(
+ fun({Module, Impl}, ok) ->
+ check_access(
+ fun() -> Module:check_topic_access(
+ auth_user(User, Impl), Resource, Permission, Context) end,
+ Module, "access to topic '~s' in exchange ~s refused for user '~s'",
+ [maps:get(routing_key, Context), rabbit_misc:rs(Resource), Username]);
+ (_, Else) -> Else
+ end, ok, Modules).
+
+check_access(Fun, Module, ErrStr, ErrArgs) ->
+ check_access(Fun, Module, ErrStr, ErrArgs, access_refused).
+
+check_access(Fun, Module, ErrStr, ErrArgs, ErrName) ->
+ case Fun() of
+ true ->
+ ok;
+ false ->
+ rabbit_misc:protocol_error(ErrName, ErrStr, ErrArgs);
+ {error, E} ->
+ FullErrStr = ErrStr ++ ", backend ~s returned an error: ~p~n",
+ FullErrArgs = ErrArgs ++ [Module, E],
+ rabbit_log:error(FullErrStr, FullErrArgs),
+ rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs)
+ end.
+
+-spec update_state(User :: rabbit_types:user(), NewState :: term()) ->
+ {'ok', rabbit_types:auth_user()} |
+ {'refused', string()} |
+ {'error', any()}.
+
+update_state(User = #user{authz_backends = Backends0}, NewState) ->
+ %% N.B.: we use foldl/3 and prepending, so the final list of
+ %% backends is in reverse order from the original list.
+ Backends = lists:foldl(
+ fun({Module, Impl}, {ok, Acc}) ->
+ case Module:state_can_expire() of
+ true ->
+ case Module:update_state(auth_user(User, Impl), NewState) of
+ {ok, #auth_user{impl = Impl1}} ->
+ {ok, [{Module, Impl1} | Acc]};
+ Else -> Else
+ end;
+ false ->
+ {ok, [{Module, Impl} | Acc]}
+ end;
+ (_, {error, _} = Err) -> Err;
+ (_, {refused, _, _} = Err) -> Err
+ end, {ok, []}, Backends0),
+ case Backends of
+ {ok, Pairs} -> {ok, User#user{authz_backends = lists:reverse(Pairs)}};
+ Else -> Else
+ end.
+
+-spec permission_cache_can_expire(User :: rabbit_types:user()) -> boolean().
+
+%% Returns true if any of the backends support credential expiration,
+%% otherwise returns false.
+permission_cache_can_expire(#user{authz_backends = Backends}) ->
+ lists:any(fun ({Module, _State}) -> Module:state_can_expire() end, Backends).
diff --git a/deps/rabbit/src/rabbit_alarm.erl b/deps/rabbit/src/rabbit_alarm.erl
new file mode 100644
index 0000000000..3f1ab7ae62
--- /dev/null
+++ b/deps/rabbit/src/rabbit_alarm.erl
@@ -0,0 +1,365 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%% There are two types of alarms handled by this module:
+%%
+%% * per-node resource (disk, memory) alarms for the whole cluster. If any node
+%% has an alarm, then all publishing should be disabled across the
+%% cluster until all alarms clear. When a node sets such an alarm,
+%% this information is automatically propagated throughout the cluster.
+%% `#alarms.alarmed_nodes' is being used to track this type of alarms.
+%% * limits local to this node (file_descriptor_limit). Used for information
+%% purposes only: logging and getting node status. This information is not propagated
+%% throughout the cluster. `#alarms.alarms' is being used to track this type of alarms.
+%% @end
+
+-module(rabbit_alarm).
+
+-behaviour(gen_event).
+
+-export([start_link/0, start/0, stop/0, register/2, set_alarm/1,
+ clear_alarm/1, get_alarms/0, get_alarms/1, get_local_alarms/0, get_local_alarms/1, on_node_up/1, on_node_down/1,
+ format_as_map/1, format_as_maps/1, is_local/1]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([remote_conserve_resources/3]). %% Internal use only
+
+-define(SERVER, ?MODULE).
+
+-define(FILE_DESCRIPTOR_RESOURCE, <<"file descriptors">>).
+-define(MEMORY_RESOURCE, <<"memory">>).
+-define(DISK_SPACE_RESOURCE, <<"disk space">>).
+
+%%----------------------------------------------------------------------------
+
+-record(alarms, {alertees :: dict:dict(pid(), rabbit_types:mfargs()),
+ alarmed_nodes :: dict:dict(node(), [resource_alarm_source()]),
+ alarms :: [alarm()]}).
+
+-type local_alarm() :: 'file_descriptor_limit'.
+-type resource_alarm_source() :: 'disk' | 'memory'.
+-type resource_alarm() :: {resource_limit, resource_alarm_source(), node()}.
+-type alarm() :: local_alarm() | resource_alarm().
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_event:start_link({local, ?SERVER}).
+
+-spec start() -> 'ok'.
+
+start() ->
+ ok = rabbit_sup:start_restartable_child(?MODULE),
+ ok = gen_event:add_handler(?SERVER, ?MODULE, []),
+ {ok, MemoryWatermark} = application:get_env(vm_memory_high_watermark),
+
+ rabbit_sup:start_restartable_child(
+ vm_memory_monitor, [MemoryWatermark,
+ fun (Alarm) ->
+ background_gc:run(),
+ set_alarm(Alarm)
+ end,
+ fun clear_alarm/1]),
+ {ok, DiskLimit} = application:get_env(disk_free_limit),
+ rabbit_sup:start_delayed_restartable_child(
+ rabbit_disk_monitor, [DiskLimit]),
+ ok.
+
+-spec stop() -> 'ok'.
+
+stop() -> ok.
+
+%% Registers a handler that should be called on every resource alarm change.
+%% Given a call rabbit_alarm:register(Pid, {M, F, A}), the handler would be
+%% called like this: `apply(M, F, A ++ [Pid, Source, Alert])', where `Source'
+%% has the type of resource_alarm_source() and `Alert' has the type of resource_alert().
+
+-spec register(pid(), rabbit_types:mfargs()) -> [atom()].
+
+register(Pid, AlertMFA) ->
+ gen_event:call(?SERVER, ?MODULE, {register, Pid, AlertMFA}, infinity).
+
+-spec set_alarm({alarm(), []}) -> 'ok'.
+
+set_alarm(Alarm) -> gen_event:notify(?SERVER, {set_alarm, Alarm}).
+
+-spec clear_alarm(alarm()) -> 'ok'.
+
+clear_alarm(Alarm) -> gen_event:notify(?SERVER, {clear_alarm, Alarm}).
+
+-spec get_alarms() -> [{alarm(), []}].
+get_alarms() -> gen_event:call(?SERVER, ?MODULE, get_alarms, infinity).
+
+-spec get_alarms(timeout()) -> [{alarm(), []}].
+get_alarms(Timeout) -> gen_event:call(?SERVER, ?MODULE, get_alarms, Timeout).
+
+-spec get_local_alarms() -> [alarm()].
+get_local_alarms() -> gen_event:call(?SERVER, ?MODULE, get_local_alarms, infinity).
+
+-spec get_local_alarms(timeout()) -> [alarm()].
+get_local_alarms(Timeout) -> gen_event:call(?SERVER, ?MODULE, get_local_alarms, Timeout).
+
+-spec filter_local_alarms([alarm()]) -> [alarm()].
+filter_local_alarms(Alarms) ->
+ lists:filter(fun is_local/1, Alarms).
+
+-spec is_local({alarm(), any()}) -> boolean().
+is_local({file_descriptor_limit, _}) -> true;
+is_local({{resource_limit, _Resource, Node}, _}) when Node =:= node() -> true;
+is_local({{resource_limit, _Resource, Node}, _}) when Node =/= node() -> false.
+
+-spec format_as_map(alarm()) -> #{binary() => term()}.
+format_as_map(file_descriptor_limit) ->
+ #{
+ <<"resource">> => ?FILE_DESCRIPTOR_RESOURCE,
+ <<"node">> => node()
+ };
+format_as_map({resource_limit, disk, Node}) ->
+ #{
+ <<"resource">> => ?DISK_SPACE_RESOURCE,
+ <<"node">> => Node
+ };
+format_as_map({resource_limit, memory, Node}) ->
+ #{
+ <<"resource">> => ?MEMORY_RESOURCE,
+ <<"node">> => Node
+ };
+format_as_map({resource_limit, Limit, Node}) ->
+ #{
+ <<"resource">> => rabbit_data_coercion:to_binary(Limit),
+ <<"node">> => Node
+ }.
+
+-spec format_as_maps([{alarm(), []}]) -> [#{any() => term()}].
+format_as_maps(Alarms) when is_list(Alarms) ->
+ %% get_alarms/0 returns
+ %%
+ %% [
+ %% {file_descriptor_limit, []},
+ %% {{resource_limit, disk, rabbit@warp10}, []},
+ %% {{resource_limit, memory, rabbit@warp10}, []}
+ %% ]
+ lists:map(fun({Resource, _}) -> format_as_map(Resource);
+ (Resource) -> format_as_map(Resource)
+ end, Alarms).
+
+
+-spec on_node_up(node()) -> 'ok'.
+on_node_up(Node) -> gen_event:notify(?SERVER, {node_up, Node}).
+
+-spec on_node_down(node()) -> 'ok'.
+on_node_down(Node) -> gen_event:notify(?SERVER, {node_down, Node}).
+
+remote_conserve_resources(Pid, Source, {true, _, _}) ->
+ gen_event:notify({?SERVER, node(Pid)},
+ {set_alarm, {{resource_limit, Source, node()}, []}});
+remote_conserve_resources(Pid, Source, {false, _, _}) ->
+ gen_event:notify({?SERVER, node(Pid)},
+ {clear_alarm, {resource_limit, Source, node()}}).
+
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #alarms{alertees = dict:new(),
+ alarmed_nodes = dict:new(),
+ alarms = []}}.
+
+handle_call({register, Pid, AlertMFA}, State = #alarms{alarmed_nodes = AN}) ->
+ {ok, lists:usort(lists:append([V || {_, V} <- dict:to_list(AN)])),
+ internal_register(Pid, AlertMFA, State)};
+
+handle_call(get_alarms, State) ->
+ {ok, compute_alarms(State), State};
+
+handle_call(get_local_alarms, State) ->
+ {ok, filter_local_alarms(compute_alarms(State)), State};
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event({set_alarm, {{resource_limit, Source, Node}, []}}, State) ->
+ case is_node_alarmed(Source, Node, State) of
+ true ->
+ {ok, State};
+ false ->
+ rabbit_event:notify(alarm_set, [{source, Source},
+ {node, Node}]),
+ handle_set_resource_alarm(Source, Node, State)
+ end;
+handle_event({set_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
+ case lists:member(Alarm, Alarms) of
+ true -> {ok, State};
+ false -> UpdatedAlarms = lists:usort([Alarm|Alarms]),
+ handle_set_alarm(Alarm, State#alarms{alarms = UpdatedAlarms})
+ end;
+
+handle_event({clear_alarm, {resource_limit, Source, Node}}, State) ->
+ case is_node_alarmed(Source, Node, State) of
+ true ->
+ rabbit_event:notify(alarm_cleared, [{source, Source},
+ {node, Node}]),
+ handle_clear_resource_alarm(Source, Node, State);
+ false ->
+ {ok, State}
+ end;
+handle_event({clear_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
+ case lists:keymember(Alarm, 1, Alarms) of
+ true -> handle_clear_alarm(
+ Alarm, State#alarms{alarms = lists:keydelete(
+ Alarm, 1, Alarms)});
+ false -> {ok, State}
+
+ end;
+
+handle_event({node_up, Node}, State) ->
+ %% Must do this via notify and not call to avoid possible deadlock.
+ ok = gen_event:notify(
+ {?SERVER, Node},
+ {register, self(), {?MODULE, remote_conserve_resources, []}}),
+ {ok, State};
+
+handle_event({node_down, Node}, #alarms{alarmed_nodes = AN} = State) ->
+ AlarmsForDeadNode = case dict:find(Node, AN) of
+ {ok, V} -> V;
+ error -> []
+ end,
+ {ok, lists:foldr(fun(Source, AccState) ->
+ rabbit_log:warning("~s resource limit alarm cleared for dead node ~p~n",
+ [Source, Node]),
+ maybe_alert(fun dict_unappend/3, Node, Source, false, AccState)
+ end, State, AlarmsForDeadNode)};
+
+handle_event({register, Pid, AlertMFA}, State) ->
+ {ok, internal_register(Pid, AlertMFA, State)};
+
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #alarms{alertees = Alertees}) ->
+ {ok, State#alarms{alertees = dict:erase(Pid, Alertees)}};
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+dict_append(Key, Val, Dict) ->
+ L = case dict:find(Key, Dict) of
+ {ok, V} -> V;
+ error -> []
+ end,
+ dict:store(Key, lists:usort([Val|L]), Dict).
+
+dict_unappend(Key, Val, Dict) ->
+ L = case dict:find(Key, Dict) of
+ {ok, V} -> V;
+ error -> []
+ end,
+
+ case lists:delete(Val, L) of
+ [] -> dict:erase(Key, Dict);
+ X -> dict:store(Key, X, Dict)
+ end.
+
+maybe_alert(UpdateFun, Node, Source, WasAlertAdded,
+ State = #alarms{alarmed_nodes = AN,
+ alertees = Alertees}) ->
+ AN1 = UpdateFun(Node, Source, AN),
+ %% Is alarm for Source still set on any node?
+ StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)),
+ case StillHasAlerts of
+ true -> ok;
+ false -> rabbit_log:warning("~s resource limit alarm cleared across the cluster~n", [Source])
+ end,
+ Alert = {WasAlertAdded, StillHasAlerts, Node},
+ case node() of
+ Node -> ok = alert_remote(Alert, Alertees, Source);
+ _ -> ok
+ end,
+ ok = alert_local(Alert, Alertees, Source),
+ State#alarms{alarmed_nodes = AN1}.
+
+alert_local(Alert, Alertees, Source) ->
+ alert(Alertees, Source, Alert, fun erlang:'=:='/2).
+
+alert_remote(Alert, Alertees, Source) ->
+ alert(Alertees, Source, Alert, fun erlang:'=/='/2).
+
+alert(Alertees, Source, Alert, NodeComparator) ->
+ Node = node(),
+ dict:fold(fun (Pid, {M, F, A}, ok) ->
+ case NodeComparator(Node, node(Pid)) of
+ true -> apply(M, F, A ++ [Pid, Source, Alert]);
+ false -> ok
+ end
+ end, ok, Alertees).
+
+internal_register(Pid, {M, F, A} = AlertMFA,
+ State = #alarms{alertees = Alertees}) ->
+ _MRef = erlang:monitor(process, Pid),
+ case dict:find(node(), State#alarms.alarmed_nodes) of
+ {ok, Sources} -> [apply(M, F, A ++ [Pid, R, {true, true, node()}]) || R <- Sources];
+ error -> ok
+ end,
+ NewAlertees = dict:store(Pid, AlertMFA, Alertees),
+ State#alarms{alertees = NewAlertees}.
+
+handle_set_resource_alarm(Source, Node, State) ->
+ rabbit_log:warning(
+ "~s resource limit alarm set on node ~p.~n~n"
+ "**********************************************************~n"
+ "*** Publishers will be blocked until this alarm clears ***~n"
+ "**********************************************************~n",
+ [Source, Node]),
+ {ok, maybe_alert(fun dict_append/3, Node, Source, true, State)}.
+
+handle_set_alarm({file_descriptor_limit, []}, State) ->
+ rabbit_log:warning(
+ "file descriptor limit alarm set.~n~n"
+ "********************************************************************~n"
+ "*** New connections will not be accepted until this alarm clears ***~n"
+ "********************************************************************~n"),
+ {ok, State};
+handle_set_alarm(Alarm, State) ->
+ rabbit_log:warning("alarm '~p' set~n", [Alarm]),
+ {ok, State}.
+
+handle_clear_resource_alarm(Source, Node, State) ->
+ rabbit_log:warning("~s resource limit alarm cleared on node ~p~n",
+ [Source, Node]),
+ {ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}.
+
+handle_clear_alarm(file_descriptor_limit, State) ->
+ rabbit_log:warning("file descriptor limit alarm cleared~n"),
+ {ok, State};
+handle_clear_alarm(Alarm, State) ->
+ rabbit_log:warning("alarm '~p' cleared~n", [Alarm]),
+ {ok, State}.
+
+is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) ->
+ case dict:find(Node, AN) of
+ {ok, Sources} ->
+ lists:member(Source, Sources);
+ error ->
+ false
+ end.
+
+compute_alarms(#alarms{alarms = Alarms,
+ alarmed_nodes = AN}) ->
+ Alarms ++ [ {{resource_limit, Source, Node}, []}
+ || {Node, Sources} <- dict:to_list(AN), Source <- Sources ].
diff --git a/deps/rabbit/src/rabbit_amqqueue.erl b/deps/rabbit/src/rabbit_amqqueue.erl
new file mode 100644
index 0000000000..cd5f894680
--- /dev/null
+++ b/deps/rabbit/src/rabbit_amqqueue.erl
@@ -0,0 +1,1889 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue).
+
+-export([warn_file_limit/0]).
+-export([recover/1, stop/1, start/1, declare/6, declare/7,
+ delete_immediately/1, delete_exclusive/2, delete/4, purge/1,
+ forget_all_durable/1]).
+-export([pseudo_queue/2, pseudo_queue/3, immutable/1]).
+-export([lookup/1, lookup_many/1, not_found_or_absent/1, not_found_or_absent_dirty/1,
+ with/2, with/3, with_or_die/2,
+ assert_equivalence/5,
+ check_exclusive_access/2, with_exclusive_access_or_die/3,
+ stat/1, deliver/2,
+ requeue/3, ack/3, reject/4]).
+-export([not_found/1, absent/2]).
+-export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2,
+ emit_info_all/5, list_local/1, info_local/1,
+ emit_info_local/4, emit_info_down/4]).
+-export([count/0]).
+-export([list_down/1, count/1, list_names/0, list_names/1, list_local_names/0,
+ list_local_names_down/0, list_with_possible_retry/1]).
+-export([list_by_type/1, sample_local_queues/0, sample_n_by_name/2, sample_n/2]).
+-export([force_event_refresh/1, notify_policy_changed/1]).
+-export([consumers/1, consumers_all/1, emit_consumers_all/4, consumer_info_keys/0]).
+-export([basic_get/5, basic_consume/12, basic_cancel/5, notify_decorators/1]).
+-export([notify_sent/2, notify_sent_queue_down/1, resume/2]).
+-export([notify_down_all/2, notify_down_all/3, activate_limit_all/2, credit/5]).
+-export([on_node_up/1, on_node_down/1]).
+-export([update/2, store_queue/1, update_decorators/1, policy_changed/2]).
+-export([update_mirroring/1, sync_mirrors/1, cancel_sync_mirrors/1]).
+-export([emit_unresponsive/6, emit_unresponsive_local/5, is_unresponsive/2]).
+-export([has_synchronised_mirrors_online/1]).
+-export([is_replicated/1, is_exclusive/1, is_not_exclusive/1, is_dead_exclusive/1]).
+-export([list_local_quorum_queues/0, list_local_quorum_queue_names/0,
+ list_local_mirrored_classic_queues/0, list_local_mirrored_classic_names/0,
+ list_local_leaders/0, list_local_followers/0, get_quorum_nodes/1,
+ list_local_mirrored_classic_without_synchronised_mirrors/0,
+ list_local_mirrored_classic_without_synchronised_mirrors_for_cli/0]).
+-export([ensure_rabbit_queue_record_is_initialized/1]).
+-export([format/1]).
+-export([delete_immediately_by_resource/1]).
+-export([delete_crashed/1,
+ delete_crashed/2,
+ delete_crashed_internal/2]).
+
+-export([pid_of/1, pid_of/2]).
+-export([mark_local_durable_queues_stopped/1]).
+
+-export([rebalance/3]).
+-export([collect_info_all/2]).
+
+-export([is_policy_applicable/2]).
+-export([is_server_named_allowed/1]).
+
+-export([check_max_age/1]).
+-export([get_queue_type/1]).
+
+%% internal
+-export([internal_declare/2, internal_delete/2, run_backing_queue/3,
+ set_ram_duration_target/2, set_maximum_since_use/2,
+ emit_consumers_local/3, internal_delete/3]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("stdlib/include/qlc.hrl").
+-include("amqqueue.hrl").
+
+-define(INTEGER_ARG_TYPES, [byte, short, signedint, long,
+ unsignedbyte, unsignedshort, unsignedint]).
+
+-define(MORE_CONSUMER_CREDIT_AFTER, 50).
+
+-define(IS_CLASSIC(QPid), is_pid(QPid)).
+-define(IS_QUORUM(QPid), is_tuple(QPid)).
+%%----------------------------------------------------------------------------
+
+-export_type([name/0, qmsg/0, absent_reason/0]).
+
+-type name() :: rabbit_types:r('queue').
+
+-type qpids() :: [pid()].
+-type qlen() :: rabbit_types:ok(non_neg_integer()).
+-type qfun(A) :: fun ((amqqueue:amqqueue()) -> A | no_return()).
+-type qmsg() :: {name(), pid() | {atom(), pid()}, msg_id(),
+ boolean(), rabbit_types:message()}.
+-type msg_id() :: non_neg_integer().
+-type ok_or_errors() ::
+ 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}.
+-type absent_reason() :: 'nodedown' | 'crashed' | stopped | timeout.
+-type queue_not_found() :: not_found.
+-type queue_absent() :: {'absent', amqqueue:amqqueue(), absent_reason()}.
+-type not_found_or_absent() :: queue_not_found() | queue_absent().
+
+%%----------------------------------------------------------------------------
+
+-define(CONSUMER_INFO_KEYS,
+ [queue_name, channel_pid, consumer_tag, ack_required, prefetch_count,
+ active, activity_status, arguments]).
+
+warn_file_limit() ->
+ DurableQueues = find_recoverable_queues(),
+ L = length(DurableQueues),
+
+ %% if there are not enough file handles, the server might hang
+ %% when trying to recover queues, warn the user:
+ case file_handle_cache:get_limit() < L of
+ true ->
+ rabbit_log:warning(
+ "Recovering ~p queues, available file handles: ~p. Please increase max open file handles limit to at least ~p!~n",
+ [L, file_handle_cache:get_limit(), L]);
+ false ->
+ ok
+ end.
+
+-spec recover(rabbit_types:vhost()) ->
+ {Recovered :: [amqqueue:amqqueue()],
+ Failed :: [amqqueue:amqqueue()]}.
+recover(VHost) ->
+ AllDurable = find_local_durable_queues(VHost),
+ rabbit_queue_type:recover(VHost, AllDurable).
+
+filter_pid_per_type(QPids) ->
+ lists:partition(fun(QPid) -> ?IS_CLASSIC(QPid) end, QPids).
+
+filter_resource_per_type(Resources) ->
+ Queues = [begin
+ {ok, Q} = lookup(Resource),
+ QPid = amqqueue:get_pid(Q),
+ {Resource, QPid}
+ end || Resource <- Resources],
+ lists:partition(fun({_Resource, QPid}) -> ?IS_CLASSIC(QPid) end, Queues).
+
+-spec stop(rabbit_types:vhost()) -> 'ok'.
+stop(VHost) ->
+ %% Classic queues
+ ok = rabbit_amqqueue_sup_sup:stop_for_vhost(VHost),
+ {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+ ok = BQ:stop(VHost),
+ rabbit_quorum_queue:stop(VHost).
+
+-spec start([amqqueue:amqqueue()]) -> 'ok'.
+
+start(Qs) ->
+ %% At this point all recovered queues and their bindings are
+ %% visible to routing, so now it is safe for them to complete
+ %% their initialisation (which may involve interacting with other
+ %% queues).
+ _ = [amqqueue:get_pid(Q) ! {self(), go}
+ || Q <- Qs,
+ %% All queues are supposed to be classic here.
+ amqqueue:is_classic(Q)],
+ ok.
+
+mark_local_durable_queues_stopped(VHost) ->
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ do_mark_local_durable_queues_stopped(VHost),
+ do_mark_local_durable_queues_stopped(VHost)).
+
+do_mark_local_durable_queues_stopped(VHost) ->
+ Qs = find_local_durable_queues(VHost),
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [ store_queue(amqqueue:set_state(Q, stopped))
+ || Q <- Qs, amqqueue:get_type(Q) =:= rabbit_classic_queue,
+ amqqueue:get_state(Q) =/= stopped ]
+ end).
+
+find_local_durable_queues(VHost) ->
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(
+ qlc:q(
+ [Q || Q <- mnesia:table(rabbit_durable_queue),
+ amqqueue:get_vhost(Q) =:= VHost andalso
+ rabbit_queue_type:is_recoverable(Q)
+ ]))
+ end).
+
+find_recoverable_queues() ->
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(qlc:q([Q || Q <- mnesia:table(rabbit_durable_queue),
+ rabbit_queue_type:is_recoverable(Q)]))
+ end).
+
+-spec declare(name(),
+ boolean(),
+ boolean(),
+ rabbit_framing:amqp_table(),
+ rabbit_types:maybe(pid()),
+ rabbit_types:username()) ->
+ {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} |
+ {'new', amqqueue:amqqueue(), rabbit_fifo_client:state()} |
+ {'absent', amqqueue:amqqueue(), absent_reason()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(QueueName, Durable, AutoDelete, Args, Owner, ActingUser) ->
+ declare(QueueName, Durable, AutoDelete, Args, Owner, ActingUser, node()).
+
+
+%% The Node argument suggests where the queue (master if mirrored)
+%% should be. Note that in some cases (e.g. with "nodes" policy in
+%% effect) this might not be possible to satisfy.
+
+-spec declare(name(),
+ boolean(),
+ boolean(),
+ rabbit_framing:amqp_table(),
+ rabbit_types:maybe(pid()),
+ rabbit_types:username(),
+ node()) ->
+ {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} |
+ {'absent', amqqueue:amqqueue(), absent_reason()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(QueueName = #resource{virtual_host = VHost}, Durable, AutoDelete, Args,
+ Owner, ActingUser, Node) ->
+ ok = check_declare_arguments(QueueName, Args),
+ Type = get_queue_type(Args),
+ case rabbit_queue_type:is_enabled(Type) of
+ true ->
+ Q0 = amqqueue:new(QueueName,
+ none,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ #{user => ActingUser},
+ Type),
+ Q = rabbit_queue_decorator:set(
+ rabbit_policy:set(Q0)),
+ rabbit_queue_type:declare(Q, Node);
+ false ->
+ {protocol_error, internal_error,
+ "Cannot declare a queue '~s' of type '~s' on node '~s': "
+ "the corresponding feature flag is disabled",
+ [rabbit_misc:rs(QueueName), Type, Node]}
+ end.
+
+get_queue_type(Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-queue-type">>) of
+ undefined ->
+ rabbit_queue_type:default();
+ {_, V} ->
+ rabbit_queue_type:discover(V)
+ end.
+
+-spec internal_declare(amqqueue:amqqueue(), boolean()) ->
+ {created | existing, amqqueue:amqqueue()} | queue_absent().
+
+internal_declare(Q, Recover) ->
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ do_internal_declare(Q, Recover),
+ begin
+ Q1 = amqqueue:upgrade(Q),
+ do_internal_declare(Q1, Recover)
+ end).
+
+do_internal_declare(Q, true) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ ok = store_queue(amqqueue:set_state(Q, live)),
+ rabbit_misc:const({created, Q})
+ end);
+do_internal_declare(Q, false) ->
+ QueueName = amqqueue:get_name(Q),
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case mnesia:wread({rabbit_queue, QueueName}) of
+ [] ->
+ case not_found_or_absent(QueueName) of
+ not_found -> Q1 = rabbit_policy:set(Q),
+ Q2 = amqqueue:set_state(Q1, live),
+ ok = store_queue(Q2),
+ fun () -> {created, Q2} end;
+ {absent, _Q, _} = R -> rabbit_misc:const(R)
+ end;
+ [ExistingQ] ->
+ rabbit_misc:const({existing, ExistingQ})
+ end
+ end).
+
+-spec update
+ (name(), fun((amqqueue:amqqueue()) -> amqqueue:amqqueue())) ->
+ 'not_found' | amqqueue:amqqueue().
+
+update(Name, Fun) ->
+ case mnesia:wread({rabbit_queue, Name}) of
+ [Q] ->
+ Durable = amqqueue:is_durable(Q),
+ Q1 = Fun(Q),
+ ok = mnesia:write(rabbit_queue, Q1, write),
+ case Durable of
+ true -> ok = mnesia:write(rabbit_durable_queue, Q1, write);
+ _ -> ok
+ end,
+ Q1;
+ [] ->
+ not_found
+ end.
+
+%% only really used for quorum queues to ensure the rabbit_queue record
+%% is initialised
+ensure_rabbit_queue_record_is_initialized(Q) ->
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ do_ensure_rabbit_queue_record_is_initialized(Q),
+ begin
+ Q1 = amqqueue:upgrade(Q),
+ do_ensure_rabbit_queue_record_is_initialized(Q1)
+ end).
+
+do_ensure_rabbit_queue_record_is_initialized(Q) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ ok = store_queue(Q),
+ rabbit_misc:const({ok, Q})
+ end).
+
+-spec store_queue(amqqueue:amqqueue()) -> 'ok'.
+
+store_queue(Q) when ?amqqueue_is_durable(Q) ->
+ Q1 = amqqueue:reset_mirroring_and_decorators(Q),
+ ok = mnesia:write(rabbit_durable_queue, Q1, write),
+ store_queue_ram(Q);
+store_queue(Q) when not ?amqqueue_is_durable(Q) ->
+ store_queue_ram(Q).
+
+store_queue_ram(Q) ->
+ ok = mnesia:write(rabbit_queue, rabbit_queue_decorator:set(Q), write).
+
+-spec update_decorators(name()) -> 'ok'.
+
+update_decorators(Name) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ case mnesia:wread({rabbit_queue, Name}) of
+ [Q] -> store_queue_ram(Q),
+ ok;
+ [] -> ok
+ end
+ end).
+
+-spec policy_changed(amqqueue:amqqueue(), amqqueue:amqqueue()) ->
+ 'ok'.
+
+policy_changed(Q1, Q2) ->
+ Decorators1 = amqqueue:get_decorators(Q1),
+ Decorators2 = amqqueue:get_decorators(Q2),
+ rabbit_mirror_queue_misc:update_mirrors(Q1, Q2),
+ D1 = rabbit_queue_decorator:select(Decorators1),
+ D2 = rabbit_queue_decorator:select(Decorators2),
+ [ok = M:policy_changed(Q1, Q2) || M <- lists:usort(D1 ++ D2)],
+ %% Make sure we emit a stats event even if nothing
+ %% mirroring-related has changed - the policy may have changed anyway.
+ notify_policy_changed(Q2).
+
+is_policy_applicable(QName, Policy) ->
+ case lookup(QName) of
+ {ok, Q} ->
+ rabbit_queue_type:is_policy_applicable(Q, Policy);
+ _ ->
+ %% Defaults to previous behaviour. Apply always
+ true
+ end.
+
+is_server_named_allowed(Args) ->
+ Type = get_queue_type(Args),
+ rabbit_queue_type:is_server_named_allowed(Type).
+
+-spec lookup
+ (name()) ->
+ rabbit_types:ok(amqqueue:amqqueue()) |
+ rabbit_types:error('not_found');
+ ([name()]) ->
+ [amqqueue:amqqueue()].
+
+lookup([]) -> []; %% optimisation
+lookup([Name]) -> ets:lookup(rabbit_queue, Name); %% optimisation
+lookup(Names) when is_list(Names) ->
+ %% Normally we'd call mnesia:dirty_read/1 here, but that is quite
+ %% expensive for reasons explained in rabbit_misc:dirty_read/1.
+ lists:append([ets:lookup(rabbit_queue, Name) || Name <- Names]);
+lookup(Name) ->
+ rabbit_misc:dirty_read({rabbit_queue, Name}).
+
+-spec lookup_many ([name()]) -> [amqqueue:amqqueue()].
+
+lookup_many(Names) when is_list(Names) ->
+ lookup(Names).
+
+-spec not_found_or_absent(name()) -> not_found_or_absent().
+
+not_found_or_absent(Name) ->
+ %% NB: we assume that the caller has already performed a lookup on
+ %% rabbit_queue and not found anything
+ case mnesia:read({rabbit_durable_queue, Name}) of
+ [] -> not_found;
+ [Q] -> {absent, Q, nodedown} %% Q exists on stopped node
+ end.
+
+-spec not_found_or_absent_dirty(name()) -> not_found_or_absent().
+
+not_found_or_absent_dirty(Name) ->
+ %% We should read from both tables inside a tx, to get a
+ %% consistent view. But the chances of an inconsistency are small,
+ %% and only affect the error kind.
+ case rabbit_misc:dirty_read({rabbit_durable_queue, Name}) of
+ {error, not_found} -> not_found;
+ {ok, Q} -> {absent, Q, nodedown}
+ end.
+
+-spec get_rebalance_lock(pid()) ->
+ {true, {rebalance_queues, pid()}} | false.
+get_rebalance_lock(Pid) when is_pid(Pid) ->
+ Id = {rebalance_queues, Pid},
+ Nodes = [node()|nodes()],
+ %% Note that we're not re-trying. We want to immediately know
+ %% if a re-balance is taking place and stop accordingly.
+ case global:set_lock(Id, Nodes, 0) of
+ true ->
+ {true, Id};
+ false ->
+ false
+ end.
+
+-spec rebalance('all' | 'quorum' | 'classic', binary(), binary()) ->
+ {ok, [{node(), pos_integer()}]} | {error, term()}.
+rebalance(Type, VhostSpec, QueueSpec) ->
+ %% We have not yet acquired the rebalance_queues global lock.
+ maybe_rebalance(get_rebalance_lock(self()), Type, VhostSpec, QueueSpec).
+
+maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) ->
+ rabbit_log:info("Starting queue rebalance operation: '~s' for vhosts matching '~s' and queues matching '~s'",
+ [Type, VhostSpec, QueueSpec]),
+ Running = rabbit_nodes:all_running(),
+ NumRunning = length(Running),
+ ToRebalance = [Q || Q <- rabbit_amqqueue:list(),
+ filter_per_type(Type, Q),
+ is_replicated(Q),
+ is_match(amqqueue:get_vhost(Q), VhostSpec) andalso
+ is_match(get_resource_name(amqqueue:get_name(Q)), QueueSpec)],
+ NumToRebalance = length(ToRebalance),
+ ByNode = group_by_node(ToRebalance),
+ Rem = case (NumToRebalance rem NumRunning) of
+ 0 -> 0;
+ _ -> 1
+ end,
+ MaxQueuesDesired = (NumToRebalance div NumRunning) + Rem,
+ Result = iterative_rebalance(ByNode, MaxQueuesDesired),
+ global:del_lock(Id),
+ rabbit_log:info("Finished queue rebalance operation"),
+ Result;
+maybe_rebalance(false, _Type, _VhostSpec, _QueueSpec) ->
+ rabbit_log:warning("Queue rebalance operation is in progress, please wait."),
+ {error, rebalance_in_progress}.
+
+filter_per_type(all, _) ->
+ true;
+filter_per_type(quorum, Q) ->
+ ?amqqueue_is_quorum(Q);
+filter_per_type(classic, Q) ->
+ ?amqqueue_is_classic(Q).
+
+rebalance_module(Q) when ?amqqueue_is_quorum(Q) ->
+ rabbit_quorum_queue;
+rebalance_module(Q) when ?amqqueue_is_classic(Q) ->
+ rabbit_mirror_queue_misc.
+
+get_resource_name(#resource{name = Name}) ->
+ Name.
+
+is_match(Subj, E) ->
+ nomatch /= re:run(Subj, E).
+
+iterative_rebalance(ByNode, MaxQueuesDesired) ->
+ case maybe_migrate(ByNode, MaxQueuesDesired) of
+ {ok, Summary} ->
+ rabbit_log:info("All queue masters are balanced"),
+ {ok, Summary};
+ {migrated, Other} ->
+ iterative_rebalance(Other, MaxQueuesDesired);
+ {not_migrated, Other} ->
+ iterative_rebalance(Other, MaxQueuesDesired)
+ end.
+
+maybe_migrate(ByNode, MaxQueuesDesired) ->
+ maybe_migrate(ByNode, MaxQueuesDesired, maps:keys(ByNode)).
+
+maybe_migrate(ByNode, _, []) ->
+ {ok, maps:fold(fun(K, V, Acc) ->
+ {CQs, QQs} = lists:partition(fun({_, Q, _}) ->
+ ?amqqueue_is_classic(Q)
+ end, V),
+ [[{<<"Node name">>, K}, {<<"Number of quorum queues">>, length(QQs)},
+ {<<"Number of classic queues">>, length(CQs)}] | Acc]
+ end, [], ByNode)};
+maybe_migrate(ByNode, MaxQueuesDesired, [N | Nodes]) ->
+ case maps:get(N, ByNode, []) of
+ [{_, Q, false} = Queue | Queues] = All when length(All) > MaxQueuesDesired ->
+ Name = amqqueue:get_name(Q),
+ Module = rebalance_module(Q),
+ OtherNodes = Module:get_replicas(Q) -- [N],
+ case OtherNodes of
+ [] ->
+ {not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)};
+ _ ->
+ [{Length, Destination} | _] = sort_by_number_of_queues(OtherNodes, ByNode),
+ rabbit_log:warning("Migrating queue ~p from node ~p with ~p queues to node ~p with ~p queues",
+ [Name, N, length(All), Destination, Length]),
+ case Module:transfer_leadership(Q, Destination) of
+ {migrated, NewNode} ->
+ rabbit_log:warning("Queue ~p migrated to ~p", [Name, NewNode]),
+ {migrated, update_migrated_queue(Destination, N, Queue, Queues, ByNode)};
+ {not_migrated, Reason} ->
+ rabbit_log:warning("Error migrating queue ~p: ~p", [Name, Reason]),
+ {not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)}
+ end
+ end;
+ [{_, _, true} | _] = All when length(All) > MaxQueuesDesired ->
+ rabbit_log:warning("Node ~p contains ~p queues, but all have already migrated. "
+ "Do nothing", [N, length(All)]),
+ maybe_migrate(ByNode, MaxQueuesDesired, Nodes);
+ All ->
+ rabbit_log:warning("Node ~p only contains ~p queues, do nothing",
+ [N, length(All)]),
+ maybe_migrate(ByNode, MaxQueuesDesired, Nodes)
+ end.
+
+update_not_migrated_queue(N, {Entries, Q, _}, Queues, ByNode) ->
+ maps:update(N, Queues ++ [{Entries, Q, true}], ByNode).
+
+update_migrated_queue(NewNode, OldNode, {Entries, Q, _}, Queues, ByNode) ->
+ maps:update_with(NewNode,
+ fun(L) -> L ++ [{Entries, Q, true}] end,
+ [{Entries, Q, true}], maps:update(OldNode, Queues, ByNode)).
+
+sort_by_number_of_queues(Nodes, ByNode) ->
+ lists:keysort(1,
+ lists:map(fun(Node) ->
+ {num_queues(Node, ByNode), Node}
+ end, Nodes)).
+
+num_queues(Node, ByNode) ->
+ length(maps:get(Node, ByNode, [])).
+
+group_by_node(Queues) ->
+ ByNode = lists:foldl(fun(Q, Acc) ->
+ Module = rebalance_module(Q),
+ Length = Module:queue_length(Q),
+ maps:update_with(amqqueue:qnode(Q),
+ fun(L) -> [{Length, Q, false} | L] end,
+ [{Length, Q, false}], Acc)
+ end, #{}, Queues),
+ maps:map(fun(_K, V) -> lists:keysort(1, V) end, ByNode).
+
+-spec with(name(),
+ qfun(A),
+ fun((not_found_or_absent()) -> rabbit_types:channel_exit())) ->
+ A | rabbit_types:channel_exit().
+
+with(Name, F, E) ->
+ with(Name, F, E, 2000).
+
+with(#resource{} = Name, F, E, RetriesLeft) ->
+ case lookup(Name) of
+ {ok, Q} when ?amqqueue_state_is(Q, live) andalso RetriesLeft =:= 0 ->
+ %% Something bad happened to that queue, we are bailing out
+ %% on processing current request.
+ E({absent, Q, timeout});
+ {ok, Q} when ?amqqueue_state_is(Q, stopped) andalso RetriesLeft =:= 0 ->
+ %% The queue was stopped and not migrated
+ E({absent, Q, stopped});
+ %% The queue process has crashed with unknown error
+ {ok, Q} when ?amqqueue_state_is(Q, crashed) ->
+ E({absent, Q, crashed});
+ %% The queue process has been stopped by a supervisor.
+ %% In that case a synchronised mirror can take over
+ %% so we should retry.
+ {ok, Q} when ?amqqueue_state_is(Q, stopped) ->
+ %% The queue process was stopped by the supervisor
+ rabbit_misc:with_exit_handler(
+ fun () -> retry_wait(Q, F, E, RetriesLeft) end,
+ fun () -> F(Q) end);
+ %% The queue is supposed to be active.
+ %% The master node can go away or queue can be killed
+ %% so we retry, waiting for a mirror to take over.
+ {ok, Q} when ?amqqueue_state_is(Q, live) ->
+ %% We check is_process_alive(QPid) in case we receive a
+ %% nodedown (for example) in F() that has nothing to do
+ %% with the QPid. F() should be written s.t. that this
+ %% cannot happen, so we bail if it does since that
+ %% indicates a code bug and we don't want to get stuck in
+ %% the retry loop.
+ rabbit_misc:with_exit_handler(
+ fun () -> retry_wait(Q, F, E, RetriesLeft) end,
+ fun () -> F(Q) end);
+ {error, not_found} ->
+ E(not_found_or_absent_dirty(Name))
+ end.
+
+-spec retry_wait(amqqueue:amqqueue(),
+ qfun(A),
+ fun((not_found_or_absent()) -> rabbit_types:channel_exit()),
+ non_neg_integer()) ->
+ A | rabbit_types:channel_exit().
+
+retry_wait(Q, F, E, RetriesLeft) ->
+ Name = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ QState = amqqueue:get_state(Q),
+ case {QState, is_replicated(Q)} of
+ %% We don't want to repeat an operation if
+ %% there are no mirrors to migrate to
+ {stopped, false} ->
+ E({absent, Q, stopped});
+ _ ->
+ case rabbit_mnesia:is_process_alive(QPid) of
+ true ->
+ % rabbitmq-server#1682
+ % The old check would have crashed here,
+ % instead, log it and run the exit fun. absent & alive is weird,
+ % but better than crashing with badmatch,true
+ rabbit_log:debug("Unexpected alive queue process ~p~n", [QPid]),
+ E({absent, Q, alive});
+ false ->
+ ok % Expected result
+ end,
+ timer:sleep(30),
+ with(Name, F, E, RetriesLeft - 1)
+ end.
+
+-spec with(name(), qfun(A)) ->
+ A | rabbit_types:error(not_found_or_absent()).
+
+with(Name, F) -> with(Name, F, fun (E) -> {error, E} end).
+
+-spec with_or_die(name(), qfun(A)) -> A | rabbit_types:channel_exit().
+
+with_or_die(Name, F) ->
+ with(Name, F, die_fun(Name)).
+
+-spec die_fun(name()) ->
+ fun((not_found_or_absent()) -> rabbit_types:channel_exit()).
+
+die_fun(Name) ->
+ fun (not_found) -> not_found(Name);
+ ({absent, Q, Reason}) -> absent(Q, Reason)
+ end.
+
+-spec not_found(name()) -> rabbit_types:channel_exit().
+
+not_found(R) -> rabbit_misc:protocol_error(not_found, "no ~s", [rabbit_misc:rs(R)]).
+
+-spec absent(amqqueue:amqqueue(), absent_reason()) ->
+ rabbit_types:channel_exit().
+
+absent(Q, AbsentReason) ->
+ QueueName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ IsDurable = amqqueue:is_durable(Q),
+ priv_absent(QueueName, QPid, IsDurable, AbsentReason).
+
+-spec priv_absent(name(), pid(), boolean(), absent_reason()) ->
+ rabbit_types:channel_exit().
+
+priv_absent(QueueName, QPid, true, nodedown) ->
+ %% The assertion of durability is mainly there because we mention
+ %% durability in the error message. That way we will hopefully
+ %% notice if at some future point our logic changes s.t. we get
+ %% here with non-durable queues.
+ rabbit_misc:protocol_error(
+ not_found,
+ "home node '~s' of durable ~s is down or inaccessible",
+ [node(QPid), rabbit_misc:rs(QueueName)]);
+
+priv_absent(QueueName, _QPid, _IsDurable, stopped) ->
+ rabbit_misc:protocol_error(
+ not_found,
+ "~s process is stopped by supervisor", [rabbit_misc:rs(QueueName)]);
+
+priv_absent(QueueName, _QPid, _IsDurable, crashed) ->
+ rabbit_misc:protocol_error(
+ not_found,
+ "~s has crashed and failed to restart", [rabbit_misc:rs(QueueName)]);
+
+priv_absent(QueueName, _QPid, _IsDurable, timeout) ->
+ rabbit_misc:protocol_error(
+ not_found,
+ "failed to perform operation on ~s due to timeout", [rabbit_misc:rs(QueueName)]);
+
+priv_absent(QueueName, QPid, _IsDurable, alive) ->
+ rabbit_misc:protocol_error(
+ not_found,
+ "failed to perform operation on ~s: its master replica ~w may be stopping or being demoted",
+ [rabbit_misc:rs(QueueName), QPid]).
+
+-spec assert_equivalence
+ (amqqueue:amqqueue(), boolean(), boolean(),
+ rabbit_framing:amqp_table(), rabbit_types:maybe(pid())) ->
+ 'ok' | rabbit_types:channel_exit() | rabbit_types:connection_exit().
+
+assert_equivalence(Q, DurableDeclare, AutoDeleteDeclare, Args1, Owner) ->
+ QName = amqqueue:get_name(Q),
+ DurableQ = amqqueue:is_durable(Q),
+ AutoDeleteQ = amqqueue:is_auto_delete(Q),
+ ok = check_exclusive_access(Q, Owner, strict),
+ ok = rabbit_misc:assert_field_equivalence(DurableQ, DurableDeclare, QName, durable),
+ ok = rabbit_misc:assert_field_equivalence(AutoDeleteQ, AutoDeleteDeclare, QName, auto_delete),
+ ok = assert_args_equivalence(Q, Args1).
+
+-spec check_exclusive_access(amqqueue:amqqueue(), pid()) ->
+ 'ok' | rabbit_types:channel_exit().
+
+check_exclusive_access(Q, Owner) -> check_exclusive_access(Q, Owner, lax).
+
+check_exclusive_access(Q, Owner, _MatchType)
+ when ?amqqueue_exclusive_owner_is(Q, Owner) ->
+ ok;
+check_exclusive_access(Q, _ReaderPid, lax)
+ when ?amqqueue_exclusive_owner_is(Q, none) ->
+ ok;
+check_exclusive_access(Q, _ReaderPid, _MatchType) ->
+ QueueName = amqqueue:get_name(Q),
+ rabbit_misc:protocol_error(
+ resource_locked,
+ "cannot obtain exclusive access to locked ~s. It could be originally "
+ "declared on another connection or the exclusive property value does not "
+ "match that of the original declaration.",
+ [rabbit_misc:rs(QueueName)]).
+
+-spec with_exclusive_access_or_die(name(), pid(), qfun(A)) ->
+ A | rabbit_types:channel_exit().
+
+with_exclusive_access_or_die(Name, ReaderPid, F) ->
+ with_or_die(Name,
+ fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end).
+
+assert_args_equivalence(Q, RequiredArgs) ->
+ QueueName = amqqueue:get_name(Q),
+ Args = amqqueue:get_arguments(Q),
+ rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName,
+ [Key || {Key, _Fun} <- declare_args()]).
+
+check_declare_arguments(QueueName, Args) ->
+ check_arguments(QueueName, Args, declare_args()).
+
+check_consume_arguments(QueueName, Args) ->
+ check_arguments(QueueName, Args, consume_args()).
+
+check_arguments(QueueName, Args, Validators) ->
+ [case rabbit_misc:table_lookup(Args, Key) of
+ undefined -> ok;
+ TypeVal -> case Fun(TypeVal, Args) of
+ ok -> ok;
+ {error, Error} -> rabbit_misc:protocol_error(
+ precondition_failed,
+ "invalid arg '~s' for ~s: ~255p",
+ [Key, rabbit_misc:rs(QueueName),
+ Error])
+ end
+ end || {Key, Fun} <- Validators],
+ ok.
+
+declare_args() ->
+ [{<<"x-expires">>, fun check_expires_arg/2},
+ {<<"x-message-ttl">>, fun check_message_ttl_arg/2},
+ {<<"x-dead-letter-exchange">>, fun check_dlxname_arg/2},
+ {<<"x-dead-letter-routing-key">>, fun check_dlxrk_arg/2},
+ {<<"x-max-length">>, fun check_non_neg_int_arg/2},
+ {<<"x-max-length-bytes">>, fun check_non_neg_int_arg/2},
+ {<<"x-max-in-memory-length">>, fun check_non_neg_int_arg/2},
+ {<<"x-max-in-memory-bytes">>, fun check_non_neg_int_arg/2},
+ {<<"x-max-priority">>, fun check_max_priority_arg/2},
+ {<<"x-overflow">>, fun check_overflow/2},
+ {<<"x-queue-mode">>, fun check_queue_mode/2},
+ {<<"x-single-active-consumer">>, fun check_single_active_consumer_arg/2},
+ {<<"x-queue-type">>, fun check_queue_type/2},
+ {<<"x-quorum-initial-group-size">>, fun check_initial_cluster_size_arg/2},
+ {<<"x-max-age">>, fun check_max_age_arg/2},
+ {<<"x-max-segment-size">>, fun check_non_neg_int_arg/2},
+ {<<"x-initial-cluster-size">>, fun check_initial_cluster_size_arg/2},
+ {<<"x-queue-leader-locator">>, fun check_queue_leader_locator_arg/2}].
+
+consume_args() -> [{<<"x-priority">>, fun check_int_arg/2},
+ {<<"x-cancel-on-ha-failover">>, fun check_bool_arg/2}].
+
+check_int_arg({Type, _}, _) ->
+ case lists:member(Type, ?INTEGER_ARG_TYPES) of
+ true -> ok;
+ false -> {error, {unacceptable_type, Type}}
+ end.
+
+check_bool_arg({bool, _}, _) -> ok;
+check_bool_arg({Type, _}, _) -> {error, {unacceptable_type, Type}}.
+
+check_non_neg_int_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok when Val >= 0 -> ok;
+ ok -> {error, {value_negative, Val}};
+ Error -> Error
+ end.
+
+check_expires_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok when Val == 0 -> {error, {value_zero, Val}};
+ ok -> rabbit_misc:check_expiry(Val);
+ Error -> Error
+ end.
+
+check_message_ttl_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok -> rabbit_misc:check_expiry(Val);
+ Error -> Error
+ end.
+
+check_max_priority_arg({Type, Val}, Args) ->
+ case check_non_neg_int_arg({Type, Val}, Args) of
+ ok when Val =< ?MAX_SUPPORTED_PRIORITY -> ok;
+ ok -> {error, {max_value_exceeded, Val}};
+ Error -> Error
+ end.
+
+check_single_active_consumer_arg({Type, Val}, Args) ->
+ case check_bool_arg({Type, Val}, Args) of
+ ok -> ok;
+ Error -> Error
+ end.
+
+check_initial_cluster_size_arg({Type, Val}, Args) ->
+ case check_non_neg_int_arg({Type, Val}, Args) of
+ ok when Val == 0 -> {error, {value_zero, Val}};
+ ok -> ok;
+ Error -> Error
+ end.
+
+check_max_age_arg({longstr, Val}, _Args) ->
+ case check_max_age(Val) of
+ {error, _} = E ->
+ E;
+ _ ->
+ ok
+ end;
+check_max_age_arg({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_max_age(MaxAge) ->
+ case re:run(MaxAge, "(^[0-9]*)(.*)", [{capture, all_but_first, list}]) of
+ {match, [Value, Unit]} ->
+ case list_to_integer(Value) of
+ I when I > 0 ->
+ case lists:member(Unit, ["Y", "M", "D", "h", "m", "s"]) of
+ true ->
+ Int = list_to_integer(Value),
+ Int * unit_value_in_ms(Unit);
+ false ->
+ {error, invalid_max_age}
+ end;
+ _ ->
+ {error, invalid_max_age}
+ end;
+ _ ->
+ {error, invalid_max_age}
+ end.
+
+unit_value_in_ms("Y") ->
+ 365 * unit_value_in_ms("D");
+unit_value_in_ms("M") ->
+ 30 * unit_value_in_ms("D");
+unit_value_in_ms("D") ->
+ 24 * unit_value_in_ms("h");
+unit_value_in_ms("h") ->
+ 3600 * unit_value_in_ms("s");
+unit_value_in_ms("m") ->
+ 60 * unit_value_in_ms("s");
+unit_value_in_ms("s") ->
+ 1000.
+
+%% Note that the validity of x-dead-letter-exchange is already verified
+%% by rabbit_channel's queue.declare handler.
+check_dlxname_arg({longstr, _}, _) -> ok;
+check_dlxname_arg({Type, _}, _) -> {error, {unacceptable_type, Type}}.
+
+check_dlxrk_arg({longstr, _}, Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-dead-letter-exchange">>) of
+ undefined -> {error, routing_key_but_no_dlx_defined};
+ _ -> ok
+ end;
+check_dlxrk_arg({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_overflow({longstr, Val}, _Args) ->
+ case lists:member(Val, [<<"drop-head">>,
+ <<"reject-publish">>,
+ <<"reject-publish-dlx">>]) of
+ true -> ok;
+ false -> {error, invalid_overflow}
+ end;
+check_overflow({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_queue_leader_locator_arg({longstr, Val}, _Args) ->
+ case lists:member(Val, [<<"client-local">>,
+ <<"random">>,
+ <<"least-leaders">>]) of
+ true -> ok;
+ false -> {error, invalid_queue_locator_arg}
+ end;
+check_queue_leader_locator_arg({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_queue_mode({longstr, Val}, _Args) ->
+ case lists:member(Val, [<<"default">>, <<"lazy">>]) of
+ true -> ok;
+ false -> {error, invalid_queue_mode}
+ end;
+check_queue_mode({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_queue_type({longstr, Val}, _Args) ->
+ case lists:member(Val, [<<"classic">>, <<"quorum">>, <<"stream">>]) of
+ true -> ok;
+ false -> {error, invalid_queue_type}
+ end;
+check_queue_type({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+-spec list() -> [amqqueue:amqqueue()].
+
+list() ->
+ list_with_possible_retry(fun do_list/0).
+
+do_list() ->
+ mnesia:dirty_match_object(rabbit_queue, amqqueue:pattern_match_all()).
+
+-spec count() -> non_neg_integer().
+
+count() ->
+ mnesia:table_info(rabbit_queue, size).
+
+-spec list_names() -> [rabbit_amqqueue:name()].
+
+list_names() -> mnesia:dirty_all_keys(rabbit_queue).
+
+list_names(VHost) -> [amqqueue:get_name(Q) || Q <- list(VHost)].
+
+list_local_names() ->
+ [ amqqueue:get_name(Q) || Q <- list(),
+ amqqueue:get_state(Q) =/= crashed, is_local_to_node(amqqueue:get_pid(Q), node())].
+
+list_local_names_down() ->
+ [ amqqueue:get_name(Q) || Q <- list(),
+ is_down(Q),
+ is_local_to_node(amqqueue:get_pid(Q), node())].
+
+is_down(Q) ->
+ try
+ info(Q, [state]) == [{state, down}]
+ catch
+ _:_ ->
+ true
+ end.
+
+
+-spec sample_local_queues() -> [amqqueue:amqqueue()].
+sample_local_queues() -> sample_n_by_name(list_local_names(), 300).
+
+-spec sample_n_by_name([rabbit_amqqueue:name()], pos_integer()) -> [amqqueue:amqqueue()].
+sample_n_by_name([], _N) ->
+ [];
+sample_n_by_name(Names, N) when is_list(Names) andalso is_integer(N) andalso N > 0 ->
+ %% lists:nth/2 throws when position is > list length
+ M = erlang:min(N, length(Names)),
+ Ids = lists:foldl(fun( _, Acc) when length(Acc) >= 100 ->
+ Acc;
+ (_, Acc) ->
+ Pick = lists:nth(rand:uniform(M), Names),
+ [Pick | Acc]
+ end,
+ [], lists:seq(1, M)),
+ lists:map(fun (Id) ->
+ {ok, Q} = rabbit_amqqueue:lookup(Id),
+ Q
+ end,
+ lists:usort(Ids)).
+
+-spec sample_n([amqqueue:amqqueue()], pos_integer()) -> [amqqueue:amqqueue()].
+sample_n([], _N) ->
+ [];
+sample_n(Queues, N) when is_list(Queues) andalso is_integer(N) andalso N > 0 ->
+ Names = [amqqueue:get_name(Q) || Q <- Queues],
+ sample_n_by_name(Names, N).
+
+
+-spec list_by_type(atom()) -> [amqqueue:amqqueue()].
+
+list_by_type(classic) -> list_by_type(rabbit_classic_queue);
+list_by_type(quorum) -> list_by_type(rabbit_quorum_queue);
+list_by_type(Type) ->
+ {atomic, Qs} =
+ mnesia:sync_transaction(
+ fun () ->
+ mnesia:match_object(rabbit_durable_queue,
+ amqqueue:pattern_match_on_type(Type),
+ read)
+ end),
+ Qs.
+
+-spec list_local_quorum_queue_names() -> [rabbit_amqqueue:name()].
+
+list_local_quorum_queue_names() ->
+ [ amqqueue:get_name(Q) || Q <- list_by_type(quorum),
+ amqqueue:get_state(Q) =/= crashed,
+ lists:member(node(), get_quorum_nodes(Q))].
+
+-spec list_local_quorum_queues() -> [amqqueue:amqqueue()].
+list_local_quorum_queues() ->
+ [ Q || Q <- list_by_type(quorum),
+ amqqueue:get_state(Q) =/= crashed,
+ lists:member(node(), get_quorum_nodes(Q))].
+
+-spec list_local_leaders() -> [amqqueue:amqqueue()].
+list_local_leaders() ->
+ [ Q || Q <- list(),
+ amqqueue:is_quorum(Q),
+ amqqueue:get_state(Q) =/= crashed, amqqueue:get_leader(Q) =:= node()].
+
+-spec list_local_followers() -> [amqqueue:amqqueue()].
+list_local_followers() ->
+ [Q
+ || Q <- list(),
+ amqqueue:is_quorum(Q),
+ amqqueue:get_state(Q) =/= crashed,
+ amqqueue:get_leader(Q) =/= node(),
+ rabbit_quorum_queue:is_recoverable(Q)
+ ].
+
+-spec list_local_mirrored_classic_queues() -> [amqqueue:amqqueue()].
+list_local_mirrored_classic_queues() ->
+ [ Q || Q <- list(),
+ amqqueue:get_state(Q) =/= crashed,
+ amqqueue:is_classic(Q),
+ is_local_to_node(amqqueue:get_pid(Q), node()),
+ is_replicated(Q)].
+
+-spec list_local_mirrored_classic_names() -> [rabbit_amqqueue:name()].
+list_local_mirrored_classic_names() ->
+ [ amqqueue:get_name(Q) || Q <- list(),
+ amqqueue:get_state(Q) =/= crashed,
+ amqqueue:is_classic(Q),
+ is_local_to_node(amqqueue:get_pid(Q), node()),
+ is_replicated(Q)].
+
+-spec list_local_mirrored_classic_without_synchronised_mirrors() ->
+ [amqqueue:amqqueue()].
+list_local_mirrored_classic_without_synchronised_mirrors() ->
+ [ Q || Q <- list(),
+ amqqueue:get_state(Q) =/= crashed,
+ amqqueue:is_classic(Q),
+ %% filter out exclusive queues as they won't actually be mirrored
+ is_not_exclusive(Q),
+ is_local_to_node(amqqueue:get_pid(Q), node()),
+ is_replicated(Q),
+ not has_synchronised_mirrors_online(Q)].
+
+-spec list_local_mirrored_classic_without_synchronised_mirrors_for_cli() ->
+ [#{binary => any()}].
+list_local_mirrored_classic_without_synchronised_mirrors_for_cli() ->
+ ClassicQs = list_local_mirrored_classic_without_synchronised_mirrors(),
+ [begin
+ #resource{name = Name} = amqqueue:get_name(Q),
+ #{
+ <<"readable_name">> => rabbit_data_coercion:to_binary(rabbit_misc:rs(amqqueue:get_name(Q))),
+ <<"name">> => Name,
+ <<"virtual_host">> => amqqueue:get_vhost(Q),
+ <<"type">> => <<"classic">>
+ }
+ end || Q <- ClassicQs].
+
+is_local_to_node(QPid, Node) when ?IS_CLASSIC(QPid) ->
+ Node =:= node(QPid);
+is_local_to_node({_, Leader} = QPid, Node) when ?IS_QUORUM(QPid) ->
+ Node =:= Leader.
+
+-spec list(rabbit_types:vhost()) -> [amqqueue:amqqueue()].
+
+list(VHostPath) ->
+ list(VHostPath, rabbit_queue).
+
+list(VHostPath, TableName) ->
+ list_with_possible_retry(fun() -> do_list(VHostPath, TableName) end).
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+do_list(VHostPath, TableName) ->
+ mnesia:async_dirty(
+ fun () ->
+ mnesia:match_object(
+ TableName,
+ amqqueue:pattern_match_on_name(rabbit_misc:r(VHostPath, queue)),
+ read)
+ end).
+
+list_with_possible_retry(Fun) ->
+ %% amqqueue migration:
+ %% The `rabbit_queue` or `rabbit_durable_queue` tables
+ %% might be migrated between the time we query the pattern
+ %% (with the `amqqueue` module) and the time we call
+ %% `mnesia:dirty_match_object()`. This would lead to an empty list
+ %% (no object matching the now incorrect pattern), not a Mnesia
+ %% error.
+ %%
+ %% So if the result is an empty list and the version of the
+ %% `amqqueue` record changed in between, we retry the operation.
+ %%
+ %% However, we don't do this if inside a Mnesia transaction: we
+ %% could end up with a live lock between this started transaction
+ %% and the Mnesia table migration which is blocked (but the
+ %% rabbit_feature_flags lock is held).
+ AmqqueueRecordVersion = amqqueue:record_version_to_use(),
+ case Fun() of
+ [] ->
+ case mnesia:is_transaction() of
+ true ->
+ [];
+ false ->
+ case amqqueue:record_version_to_use() of
+ AmqqueueRecordVersion -> [];
+ _ -> Fun()
+ end
+ end;
+ Ret ->
+ Ret
+ end.
+
+-spec list_down(rabbit_types:vhost()) -> [amqqueue:amqqueue()].
+
+list_down(VHostPath) ->
+ case rabbit_vhost:exists(VHostPath) of
+ false -> [];
+ true ->
+ Present = list(VHostPath),
+ Durable = list(VHostPath, rabbit_durable_queue),
+ PresentS = sets:from_list([amqqueue:get_name(Q) || Q <- Present]),
+ sets:to_list(sets:filter(fun (Q) ->
+ N = amqqueue:get_name(Q),
+ not sets:is_element(N, PresentS)
+ end, sets:from_list(Durable)))
+ end.
+
+count(VHost) ->
+ try
+ %% this is certainly suboptimal but there is no way to count
+ %% things using a secondary index in Mnesia. Our counter-table-per-node
+ %% won't work here because with master migration of mirrored queues
+ %% the "ownership" of queues by nodes becomes a non-trivial problem
+ %% that requires a proper consensus algorithm.
+ length(list_for_count(VHost))
+ catch _:Err ->
+ rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p~n",
+ [VHost, Err]),
+ 0
+ end.
+
+list_for_count(VHost) ->
+ list_with_possible_retry(
+ fun() ->
+ mnesia:dirty_index_read(rabbit_queue,
+ VHost,
+ amqqueue:field_vhost())
+ end).
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+%% It should no default to classic queue keys, but a subset of those that must be shared
+%% by all queue types. Not sure this is even being used, so will leave it here for backwards
+%% compatibility. Each queue type handles now info(Q, all_keys) with the keys it supports.
+info_keys() -> rabbit_amqqueue_process:info_keys().
+
+map(Qs, F) -> rabbit_misc:filter_exit_map(F, Qs).
+
+is_unresponsive(Q, _Timeout) when ?amqqueue_state_is(Q, crashed) ->
+ false;
+is_unresponsive(Q, Timeout) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ try
+ delegate:invoke(QPid, {gen_server2, call, [{info, [name]}, Timeout]}),
+ false
+ catch
+ %% TODO catch any exit??
+ exit:{timeout, _} ->
+ true
+ end;
+is_unresponsive(Q, Timeout) when ?amqqueue_is_quorum(Q) ->
+ try
+ Leader = amqqueue:get_pid(Q),
+ case rabbit_fifo_client:stat(Leader, Timeout) of
+ {ok, _, _} -> false;
+ {timeout, _} -> true;
+ {error, _} -> true
+ end
+ catch
+ exit:{timeout, _} ->
+ true
+ end.
+
+format(Q) when ?amqqueue_is_quorum(Q) -> rabbit_quorum_queue:format(Q);
+format(Q) -> rabbit_amqqueue_process:format(Q).
+
+-spec info(amqqueue:amqqueue()) -> rabbit_types:infos().
+
+info(Q) when ?is_amqqueue(Q) -> rabbit_queue_type:info(Q, all_keys).
+
+
+-spec info(amqqueue:amqqueue(), rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+
+info(Q, Items) when ?is_amqqueue(Q) ->
+ rabbit_queue_type:info(Q, Items).
+
+info_down(Q, DownReason) ->
+ rabbit_queue_type:info_down(Q, DownReason).
+
+info_down(Q, Items, DownReason) ->
+ rabbit_queue_type:info_down(Q, Items, DownReason).
+
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+info_all(VHostPath) ->
+ map(list(VHostPath), fun (Q) -> info(Q) end) ++
+ map(list_down(VHostPath), fun (Q) -> info_down(Q, down) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) ->
+ [rabbit_types:infos()].
+
+info_all(VHostPath, Items) ->
+ map(list(VHostPath), fun (Q) -> info(Q, Items) end) ++
+ map(list_down(VHostPath), fun (Q) -> info_down(Q, Items, down) end).
+
+emit_info_local(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Q) -> info(Q, Items) end, list_local(VHostPath)).
+
+emit_info_all(Nodes, VHostPath, Items, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_amqqueue, emit_info_local, [VHostPath, Items, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids).
+
+collect_info_all(VHostPath, Items) ->
+ Nodes = rabbit_nodes:all_running(),
+ Ref = make_ref(),
+ Pids = [ spawn_link(Node, rabbit_amqqueue, emit_info_local, [VHostPath, Items, Ref, self()]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ wait_for_queues(Ref, length(Pids), []).
+
+wait_for_queues(Ref, N, Acc) ->
+ receive
+ {Ref, finished} when N == 1 ->
+ Acc;
+ {Ref, finished} ->
+ wait_for_queues(Ref, N - 1, Acc);
+ {Ref, Items, continue} ->
+ wait_for_queues(Ref, N, [Items | Acc])
+ after
+ 1000 ->
+ Acc
+ end.
+
+emit_info_down(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Q) -> info_down(Q, Items, down) end,
+ list_down(VHostPath)).
+
+emit_unresponsive_local(VHostPath, Items, Timeout, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Q) -> case is_unresponsive(Q, Timeout) of
+ true -> info_down(Q, Items, unresponsive);
+ false -> []
+ end
+ end, list_local(VHostPath)
+ ).
+
+emit_unresponsive(Nodes, VHostPath, Items, Timeout, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_amqqueue, emit_unresponsive_local,
+ [VHostPath, Items, Timeout, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids).
+
+info_local(VHostPath) ->
+ map(list_local(VHostPath), fun (Q) -> info(Q, [name]) end).
+
+list_local(VHostPath) ->
+ [Q || Q <- list(VHostPath),
+ amqqueue:get_state(Q) =/= crashed, is_local_to_node(amqqueue:get_pid(Q), node())].
+
+-spec force_event_refresh(reference()) -> 'ok'.
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+force_event_refresh(Ref) ->
+ %% note: quorum queuse emit stats on periodic ticks that run unconditionally,
+ %% so force_event_refresh is unnecessary (and, in fact, would only produce log noise) for QQs.
+ ClassicQs = list_by_type(rabbit_classic_queue),
+ [gen_server2:cast(amqqueue:get_pid(Q),
+ {force_event_refresh, Ref}) || Q <- ClassicQs],
+ ok.
+
+-spec notify_policy_changed(amqqueue:amqqueue()) -> 'ok'.
+notify_policy_changed(Q) when ?is_amqqueue(Q) ->
+ rabbit_queue_type:policy_changed(Q).
+
+-spec consumers(amqqueue:amqqueue()) ->
+ [{pid(), rabbit_types:ctag(), boolean(), non_neg_integer(),
+ boolean(), atom(),
+ rabbit_framing:amqp_table(), rabbit_types:username()}].
+
+consumers(Q) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke(QPid, {gen_server2, call, [consumers, infinity]});
+consumers(Q) when ?amqqueue_is_quorum(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid, fun rabbit_fifo:query_consumers/1) of
+ {ok, {_, Result}, _} -> maps:values(Result);
+ _ -> []
+ end;
+consumers(Q) when ?amqqueue_is_stream(Q) ->
+ %% TODO how??? they only exist on the channel
+ %% we could list the offset listener on the writer but we don't even have a consumer tag,
+ %% only a (channel) pid and offset
+ [].
+
+-spec consumer_info_keys() -> rabbit_types:info_keys().
+
+consumer_info_keys() -> ?CONSUMER_INFO_KEYS.
+
+-spec consumers_all(rabbit_types:vhost()) ->
+ [{name(), pid(), rabbit_types:ctag(), boolean(),
+ non_neg_integer(), rabbit_framing:amqp_table()}].
+
+consumers_all(VHostPath) ->
+ ConsumerInfoKeys = consumer_info_keys(),
+ lists:append(
+ map(list(VHostPath),
+ fun(Q) -> get_queue_consumer_info(Q, ConsumerInfoKeys) end)).
+
+emit_consumers_all(Nodes, VHostPath, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_amqqueue, emit_consumers_local, [VHostPath, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ ok.
+
+emit_consumers_local(VHostPath, Ref, AggregatorPid) ->
+ ConsumerInfoKeys = consumer_info_keys(),
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(Q) -> get_queue_consumer_info(Q, ConsumerInfoKeys) end,
+ list_local(VHostPath)).
+
+get_queue_consumer_info(Q, ConsumerInfoKeys) ->
+ [lists:zip(ConsumerInfoKeys,
+ [amqqueue:get_name(Q), ChPid, CTag,
+ AckRequired, Prefetch, Active, ActivityStatus, Args]) ||
+ {ChPid, CTag, AckRequired, Prefetch, Active, ActivityStatus, Args, _} <- consumers(Q)].
+
+-spec stat(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer(), non_neg_integer()}.
+stat(Q) ->
+ rabbit_queue_type:stat(Q).
+
+-spec pid_of(amqqueue:amqqueue()) ->
+ pid().
+
+pid_of(Q) -> amqqueue:get_pid(Q).
+
+-spec pid_of(rabbit_types:vhost(), rabbit_misc:resource_name()) ->
+ pid() | rabbit_types:error('not_found').
+
+pid_of(VHost, QueueName) ->
+ case lookup(rabbit_misc:r(VHost, queue, QueueName)) of
+ {ok, Q} -> pid_of(Q);
+ {error, not_found} = E -> E
+ end.
+
+-spec delete_exclusive(qpids(), pid()) -> 'ok'.
+
+delete_exclusive(QPids, ConnId) ->
+ rabbit_amqqueue_common:delete_exclusive(QPids, ConnId).
+
+-spec delete_immediately(qpids()) -> 'ok'.
+
+delete_immediately(QPids) ->
+ {Classic, Quorum} = filter_pid_per_type(QPids),
+ [gen_server2:cast(QPid, delete_immediately) || QPid <- Classic],
+ case Quorum of
+ [] -> ok;
+ _ -> {error, cannot_delete_quorum_queues, Quorum}
+ end.
+
+delete_immediately_by_resource(Resources) ->
+ {Classic, Quorum} = filter_resource_per_type(Resources),
+ [gen_server2:cast(QPid, delete_immediately) || {_, QPid} <- Classic],
+ [rabbit_quorum_queue:delete_immediately(Resource, QPid)
+ || {Resource, QPid} <- Quorum],
+ ok.
+
+-spec delete
+ (amqqueue:amqqueue(), 'false', 'false', rabbit_types:username()) ->
+ qlen() |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()};
+ (amqqueue:amqqueue(), 'true' , 'false', rabbit_types:username()) ->
+ qlen() | rabbit_types:error('in_use') |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()};
+ (amqqueue:amqqueue(), 'false', 'true', rabbit_types:username()) ->
+ qlen() | rabbit_types:error('not_empty') |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()};
+ (amqqueue:amqqueue(), 'true' , 'true', rabbit_types:username()) ->
+ qlen() |
+ rabbit_types:error('in_use') |
+ rabbit_types:error('not_empty') |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+delete(Q, IfUnused, IfEmpty, ActingUser) ->
+ rabbit_queue_type:delete(Q, IfUnused, IfEmpty, ActingUser).
+
+%% delete_crashed* INCLUDED FOR BACKWARDS COMPATBILITY REASONS
+delete_crashed(Q) when ?amqqueue_is_classic(Q) ->
+ rabbit_classic_queue:delete_crashed(Q).
+
+delete_crashed(Q, ActingUser) when ?amqqueue_is_classic(Q) ->
+ rabbit_classic_queue:delete_crashed(Q, ActingUser).
+
+-spec delete_crashed_internal(amqqueue:amqqueue(), rabbit_types:username()) -> 'ok'.
+delete_crashed_internal(Q, ActingUser) when ?amqqueue_is_classic(Q) ->
+ rabbit_classic_queue:delete_crashed_internal(Q, ActingUser).
+
+-spec purge(amqqueue:amqqueue()) -> qlen().
+purge(Q) when ?is_amqqueue(Q) ->
+ rabbit_queue_type:purge(Q).
+
+-spec requeue(name(),
+ {rabbit_fifo:consumer_tag(), [msg_id()]},
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}.
+requeue(QRef, {CTag, MsgIds}, QStates) ->
+ reject(QRef, true, {CTag, MsgIds}, QStates).
+
+-spec ack(name(),
+ {rabbit_fifo:consumer_tag(), [msg_id()]},
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}.
+ack(QPid, {CTag, MsgIds}, QueueStates) ->
+ rabbit_queue_type:settle(QPid, complete, CTag, MsgIds, QueueStates).
+
+
+-spec reject(name(),
+ boolean(),
+ {rabbit_fifo:consumer_tag(), [msg_id()]},
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}.
+reject(QRef, Requeue, {CTag, MsgIds}, QStates) ->
+ Op = case Requeue of
+ true -> requeue;
+ false -> discard
+ end,
+ rabbit_queue_type:settle(QRef, Op, CTag, MsgIds, QStates).
+
+-spec notify_down_all(qpids(), pid()) -> ok_or_errors().
+notify_down_all(QPids, ChPid) ->
+ notify_down_all(QPids, ChPid, ?CHANNEL_OPERATION_TIMEOUT).
+
+-spec notify_down_all(qpids(), pid(), non_neg_integer()) ->
+ ok_or_errors().
+notify_down_all(QPids, ChPid, Timeout) ->
+ case rpc:call(node(), delegate, invoke,
+ [QPids, {gen_server2, call, [{notify_down, ChPid}, infinity]}], Timeout) of
+ {badrpc, timeout} -> {error, {channel_operation_timeout, Timeout}};
+ {badrpc, Reason} -> {error, Reason};
+ {_, Bads} ->
+ case lists:filter(
+ fun ({_Pid, {exit, {R, _}, _}}) ->
+ rabbit_misc:is_abnormal_exit(R);
+ ({_Pid, _}) -> false
+ end, Bads) of
+ [] -> ok;
+ Bads1 -> {error, Bads1}
+ end;
+ Error -> {error, Error}
+ end.
+
+-spec activate_limit_all(qpids(), pid()) -> ok.
+
+activate_limit_all(QRefs, ChPid) ->
+ QPids = [P || P <- QRefs, ?IS_CLASSIC(P)],
+ delegate:invoke_no_result(QPids, {gen_server2, cast,
+ [{activate_limit, ChPid}]}).
+
+-spec credit(amqqueue:amqqueue(),
+ rabbit_types:ctag(),
+ non_neg_integer(),
+ boolean(),
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}.
+credit(Q, CTag, Credit, Drain, QStates) ->
+ rabbit_queue_type:credit(Q, CTag, Credit, Drain, QStates).
+
+-spec basic_get(amqqueue:amqqueue(), boolean(), pid(), rabbit_types:ctag(),
+ rabbit_queue_type:state()) ->
+ {'ok', non_neg_integer(), qmsg(), rabbit_queue_type:state()} |
+ {'empty', rabbit_queue_type:state()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+basic_get(Q, NoAck, LimiterPid, CTag, QStates0) ->
+ rabbit_queue_type:dequeue(Q, NoAck, LimiterPid, CTag, QStates0).
+
+
+-spec basic_consume(amqqueue:amqqueue(), boolean(), pid(), pid(), boolean(),
+ non_neg_integer(), rabbit_types:ctag(), boolean(),
+ rabbit_framing:amqp_table(), any(), rabbit_types:username(),
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()} |
+ {error, term()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+basic_consume(Q, NoAck, ChPid, LimiterPid,
+ LimiterActive, ConsumerPrefetchCount, ConsumerTag,
+ ExclusiveConsume, Args, OkMsg, ActingUser, Contexts) ->
+
+ QName = amqqueue:get_name(Q),
+ %% first phase argument validation
+ %% each queue type may do further validations
+ ok = check_consume_arguments(QName, Args),
+ Spec = #{no_ack => NoAck,
+ channel_pid => ChPid,
+ limiter_pid => LimiterPid,
+ limiter_active => LimiterActive,
+ prefetch_count => ConsumerPrefetchCount,
+ consumer_tag => ConsumerTag,
+ exclusive_consume => ExclusiveConsume,
+ args => Args,
+ ok_msg => OkMsg,
+ acting_user => ActingUser},
+ rabbit_queue_type:consume(Q, Spec, Contexts).
+
+-spec basic_cancel(amqqueue:amqqueue(), rabbit_types:ctag(), any(),
+ rabbit_types:username(),
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state()} | {error, term()}.
+basic_cancel(Q, ConsumerTag, OkMsg, ActingUser, QStates) ->
+ rabbit_queue_type:cancel(Q, ConsumerTag,
+ OkMsg, ActingUser, QStates).
+
+-spec notify_decorators(amqqueue:amqqueue()) -> 'ok'.
+
+notify_decorators(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke_no_result(QPid, {gen_server2, cast, [notify_decorators]}).
+
+notify_sent(QPid, ChPid) ->
+ rabbit_amqqueue_common:notify_sent(QPid, ChPid).
+
+notify_sent_queue_down(QPid) ->
+ rabbit_amqqueue_common:notify_sent_queue_down(QPid).
+
+-spec resume(pid(), pid()) -> 'ok'.
+
+resume(QPid, ChPid) -> delegate:invoke_no_result(QPid, {gen_server2, cast,
+ [{resume, ChPid}]}).
+
+internal_delete1(QueueName, OnlyDurable) ->
+ internal_delete1(QueueName, OnlyDurable, normal).
+
+internal_delete1(QueueName, OnlyDurable, Reason) ->
+ ok = mnesia:delete({rabbit_queue, QueueName}),
+ case Reason of
+ auto_delete ->
+ case mnesia:wread({rabbit_durable_queue, QueueName}) of
+ [] -> ok;
+ [_] -> ok = mnesia:delete({rabbit_durable_queue, QueueName})
+ end;
+ _ ->
+ mnesia:delete({rabbit_durable_queue, QueueName})
+ end,
+ %% we want to execute some things, as decided by rabbit_exchange,
+ %% after the transaction.
+ rabbit_binding:remove_for_destination(QueueName, OnlyDurable).
+
+-spec internal_delete(name(), rabbit_types:username()) -> 'ok'.
+
+internal_delete(QueueName, ActingUser) ->
+ internal_delete(QueueName, ActingUser, normal).
+
+internal_delete(QueueName, ActingUser, Reason) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case {mnesia:wread({rabbit_queue, QueueName}),
+ mnesia:wread({rabbit_durable_queue, QueueName})} of
+ {[], []} ->
+ rabbit_misc:const(ok);
+ _ ->
+ Deletions = internal_delete1(QueueName, false, Reason),
+ T = rabbit_binding:process_deletions(Deletions,
+ ?INTERNAL_USER),
+ fun() ->
+ ok = T(),
+ rabbit_core_metrics:queue_deleted(QueueName),
+ ok = rabbit_event:notify(queue_deleted,
+ [{name, QueueName},
+ {user_who_performed_action, ActingUser}])
+ end
+ end
+ end).
+
+-spec forget_all_durable(node()) -> 'ok'.
+
+forget_all_durable(Node) ->
+ %% Note rabbit is not running so we avoid e.g. the worker pool. Also why
+ %% we don't invoke the return from rabbit_binding:process_deletions/1.
+ {atomic, ok} =
+ mnesia:sync_transaction(
+ fun () ->
+ Qs = mnesia:match_object(rabbit_durable_queue,
+ amqqueue:pattern_match_all(), write),
+ [forget_node_for_queue(Node, Q) ||
+ Q <- Qs,
+ is_local_to_node(amqqueue:get_pid(Q), Node)],
+ ok
+ end),
+ ok.
+
+%% Try to promote a mirror while down - it should recover as a
+%% master. We try to take the oldest mirror here for best chance of
+%% recovery.
+forget_node_for_queue(_DeadNode, Q)
+ when ?amqqueue_is_quorum(Q) ->
+ ok;
+forget_node_for_queue(DeadNode, Q) ->
+ RS = amqqueue:get_recoverable_slaves(Q),
+ forget_node_for_queue(DeadNode, RS, Q).
+
+forget_node_for_queue(_DeadNode, [], Q) ->
+ %% No mirrors to recover from, queue is gone.
+ %% Don't process_deletions since that just calls callbacks and we
+ %% are not really up.
+ Name = amqqueue:get_name(Q),
+ internal_delete1(Name, true);
+
+%% Should not happen, but let's be conservative.
+forget_node_for_queue(DeadNode, [DeadNode | T], Q) ->
+ forget_node_for_queue(DeadNode, T, Q);
+
+forget_node_for_queue(DeadNode, [H|T], Q) when ?is_amqqueue(Q) ->
+ Type = amqqueue:get_type(Q),
+ case {node_permits_offline_promotion(H), Type} of
+ {false, _} -> forget_node_for_queue(DeadNode, T, Q);
+ {true, rabbit_classic_queue} ->
+ Q1 = amqqueue:set_pid(Q, rabbit_misc:node_to_fake_pid(H)),
+ ok = mnesia:write(rabbit_durable_queue, Q1, write);
+ {true, rabbit_quorum_queue} ->
+ ok
+ end.
+
+node_permits_offline_promotion(Node) ->
+ case node() of
+ Node -> not rabbit:is_running(); %% [1]
+ _ -> All = rabbit_mnesia:cluster_nodes(all),
+ Running = rabbit_nodes:all_running(),
+ lists:member(Node, All) andalso
+ not lists:member(Node, Running) %% [2]
+ end.
+%% [1] In this case if we are a real running node (i.e. rabbitmqctl
+%% has RPCed into us) then we cannot allow promotion. If on the other
+%% hand we *are* rabbitmqctl impersonating the node for offline
+%% node-forgetting then we can.
+%%
+%% [2] This is simpler; as long as it's down that's OK
+
+-spec run_backing_queue
+ (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) ->
+ 'ok'.
+
+run_backing_queue(QPid, Mod, Fun) ->
+ gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}).
+
+-spec set_ram_duration_target(pid(), number() | 'infinity') -> 'ok'.
+
+set_ram_duration_target(QPid, Duration) ->
+ gen_server2:cast(QPid, {set_ram_duration_target, Duration}).
+
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
+
+set_maximum_since_use(QPid, Age) ->
+ gen_server2:cast(QPid, {set_maximum_since_use, Age}).
+
+-spec update_mirroring(pid()) -> 'ok'.
+
+update_mirroring(QPid) ->
+ ok = delegate:invoke_no_result(QPid, {gen_server2, cast, [update_mirroring]}).
+
+-spec sync_mirrors(amqqueue:amqqueue() | pid()) ->
+ 'ok' | rabbit_types:error('not_mirrored').
+
+sync_mirrors(Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke(QPid, {gen_server2, call, [sync_mirrors, infinity]});
+sync_mirrors(QPid) ->
+ delegate:invoke(QPid, {gen_server2, call, [sync_mirrors, infinity]}).
+
+-spec cancel_sync_mirrors(amqqueue:amqqueue() | pid()) ->
+ 'ok' | {'ok', 'not_syncing'}.
+
+cancel_sync_mirrors(Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke(QPid, {gen_server2, call, [cancel_sync_mirrors, infinity]});
+cancel_sync_mirrors(QPid) ->
+ delegate:invoke(QPid, {gen_server2, call, [cancel_sync_mirrors, infinity]}).
+
+-spec is_replicated(amqqueue:amqqueue()) -> boolean().
+
+is_replicated(Q) when ?amqqueue_is_quorum(Q) ->
+ true;
+is_replicated(Q) ->
+ rabbit_mirror_queue_misc:is_mirrored(Q).
+
+is_exclusive(Q) when ?amqqueue_exclusive_owner_is(Q, none) ->
+ false;
+is_exclusive(Q) when ?amqqueue_exclusive_owner_is_pid(Q) ->
+ true.
+
+is_not_exclusive(Q) ->
+ not is_exclusive(Q).
+
+is_dead_exclusive(Q) when ?amqqueue_exclusive_owner_is(Q, none) ->
+ false;
+is_dead_exclusive(Q) when ?amqqueue_exclusive_owner_is_pid(Q) ->
+ Pid = amqqueue:get_pid(Q),
+ not rabbit_mnesia:is_process_alive(Pid).
+
+-spec has_synchronised_mirrors_online(amqqueue:amqqueue()) -> boolean().
+has_synchronised_mirrors_online(Q) ->
+ %% a queue with all mirrors down would have no mirror pids.
+ %% We treat these as in sync intentionally to avoid false positives.
+ MirrorPids = amqqueue:get_sync_slave_pids(Q),
+ MirrorPids =/= [] andalso lists:any(fun rabbit_misc:is_process_alive/1, MirrorPids).
+
+-spec on_node_up(node()) -> 'ok'.
+
+on_node_up(Node) ->
+ ok = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ Qs = mnesia:match_object(rabbit_queue,
+ amqqueue:pattern_match_all(), write),
+ [maybe_clear_recoverable_node(Node, Q) || Q <- Qs],
+ ok
+ end).
+
+maybe_clear_recoverable_node(Node, Q) ->
+ SPids = amqqueue:get_sync_slave_pids(Q),
+ RSs = amqqueue:get_recoverable_slaves(Q),
+ case lists:member(Node, RSs) of
+ true ->
+ %% There is a race with
+ %% rabbit_mirror_queue_slave:record_synchronised/1 called
+ %% by the incoming mirror node and this function, called
+ %% by the master node. If this function is executed after
+ %% record_synchronised/1, the node is erroneously removed
+ %% from the recoverable mirrors list.
+ %%
+ %% We check if the mirror node's queue PID is alive. If it is
+ %% the case, then this function is executed after. In this
+ %% situation, we don't touch the queue record, it is already
+ %% correct.
+ DoClearNode =
+ case [SP || SP <- SPids, node(SP) =:= Node] of
+ [SPid] -> not rabbit_misc:is_process_alive(SPid);
+ _ -> true
+ end,
+ if
+ DoClearNode -> RSs1 = RSs -- [Node],
+ store_queue(
+ amqqueue:set_recoverable_slaves(Q, RSs1));
+ true -> ok
+ end;
+ false ->
+ ok
+ end.
+
+-spec on_node_down(node()) -> 'ok'.
+
+on_node_down(Node) ->
+ {QueueNames, QueueDeletions} = delete_queues_on_node_down(Node),
+ notify_queue_binding_deletions(QueueDeletions),
+ rabbit_core_metrics:queues_deleted(QueueNames),
+ notify_queues_deleted(QueueNames),
+ ok.
+
+delete_queues_on_node_down(Node) ->
+ lists:unzip(lists:flatten([
+ rabbit_misc:execute_mnesia_transaction(
+ fun () -> [{Queue, delete_queue(Queue)} || Queue <- Queues] end
+ ) || Queues <- partition_queues(queues_to_delete_when_node_down(Node))
+ ])).
+
+delete_queue(QueueName) ->
+ ok = mnesia:delete({rabbit_queue, QueueName}),
+ rabbit_binding:remove_transient_for_destination(QueueName).
+
+% If there are many queues and we delete them all in a single Mnesia transaction,
+% this can block all other Mnesia operations for a really long time.
+% In situations where a node wants to (re-)join a cluster,
+% Mnesia won't be able to sync on the new node until this operation finishes.
+% As a result, we want to have multiple Mnesia transactions so that other
+% operations can make progress in between these queue delete transactions.
+%
+% 10 queues per Mnesia transaction is an arbitrary number, but it seems to work OK with 50k queues per node.
+partition_queues([Q0,Q1,Q2,Q3,Q4,Q5,Q6,Q7,Q8,Q9 | T]) ->
+ [[Q0,Q1,Q2,Q3,Q4,Q5,Q6,Q7,Q8,Q9] | partition_queues(T)];
+partition_queues(T) ->
+ [T].
+
+queues_to_delete_when_node_down(NodeDown) ->
+ rabbit_misc:execute_mnesia_transaction(fun () ->
+ qlc:e(qlc:q([amqqueue:get_name(Q) ||
+ Q <- mnesia:table(rabbit_queue),
+ amqqueue:qnode(Q) == NodeDown andalso
+ not rabbit_mnesia:is_process_alive(amqqueue:get_pid(Q)) andalso
+ (not rabbit_amqqueue:is_replicated(Q) orelse
+ rabbit_amqqueue:is_dead_exclusive(Q))]
+ ))
+ end).
+
+notify_queue_binding_deletions(QueueDeletions) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun() ->
+ rabbit_binding:process_deletions(
+ lists:foldl(
+ fun rabbit_binding:combine_deletions/2,
+ rabbit_binding:new_deletions(),
+ QueueDeletions
+ ),
+ ?INTERNAL_USER
+ )
+ end
+ ).
+
+notify_queues_deleted(QueueDeletions) ->
+ lists:foreach(
+ fun(Queue) ->
+ ok = rabbit_event:notify(queue_deleted,
+ [{name, Queue},
+ {user, ?INTERNAL_USER}])
+ end,
+ QueueDeletions).
+
+-spec pseudo_queue(name(), pid()) -> amqqueue:amqqueue().
+
+pseudo_queue(QueueName, Pid) ->
+ pseudo_queue(QueueName, Pid, false).
+
+-spec pseudo_queue(name(), pid(), boolean()) -> amqqueue:amqqueue().
+
+pseudo_queue(#resource{kind = queue} = QueueName, Pid, Durable)
+ when is_pid(Pid) andalso
+ is_boolean(Durable) ->
+ amqqueue:new(QueueName,
+ Pid,
+ Durable,
+ false,
+ none, % Owner,
+ [],
+ undefined, % VHost,
+ #{user => undefined}, % ActingUser
+ rabbit_classic_queue % Type
+ ).
+
+-spec immutable(amqqueue:amqqueue()) -> amqqueue:amqqueue().
+
+immutable(Q) -> amqqueue:set_immutable(Q).
+
+-spec deliver([amqqueue:amqqueue()], rabbit_types:delivery()) -> 'ok'.
+
+deliver(Qs, Delivery) ->
+ _ = rabbit_queue_type:deliver(Qs, Delivery, stateless),
+ ok.
+
+get_quorum_nodes(Q) ->
+ case amqqueue:get_type_state(Q) of
+ #{nodes := Nodes} ->
+ Nodes;
+ _ ->
+ []
+ end.
diff --git a/deps/rabbit/src/rabbit_amqqueue_process.erl b/deps/rabbit/src/rabbit_amqqueue_process.erl
new file mode 100644
index 0000000000..abad3b5ad4
--- /dev/null
+++ b/deps/rabbit/src/rabbit_amqqueue_process.erl
@@ -0,0 +1,1849 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_process).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("amqqueue.hrl").
+
+-behaviour(gen_server2).
+
+-define(SYNC_INTERVAL, 200). %% milliseconds
+-define(RAM_DURATION_UPDATE_INTERVAL, 5000).
+-define(CONSUMER_BIAS_RATIO, 2.0). %% i.e. consume 100% faster
+
+-export([info_keys/0]).
+
+-export([init_with_backing_queue_state/7]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
+ prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
+-export([format/1]).
+-export([is_policy_applicable/2]).
+
+%% Queue's state
+-record(q, {
+ %% an #amqqueue record
+ q :: amqqueue:amqqueue(),
+ %% none | {exclusive consumer channel PID, consumer tag} | {single active consumer channel PID, consumer}
+ active_consumer,
+ %% Set to true if a queue has ever had a consumer.
+ %% This is used to determine when to delete auto-delete queues.
+ has_had_consumers,
+ %% backing queue module.
+ %% for mirrored queues, this will be rabbit_mirror_queue_master.
+ %% for non-priority and non-mirrored queues, rabbit_variable_queue.
+ %% see rabbit_backing_queue.
+ backing_queue,
+ %% backing queue state.
+ %% see rabbit_backing_queue, rabbit_variable_queue.
+ backing_queue_state,
+ %% consumers state, see rabbit_queue_consumers
+ consumers,
+ %% queue expiration value
+ expires,
+ %% timer used to periodically sync (flush) queue index
+ sync_timer_ref,
+ %% timer used to update ingress/egress rates and queue RAM duration target
+ rate_timer_ref,
+ %% timer used to clean up this queue due to TTL (on when unused)
+ expiry_timer_ref,
+ %% stats emission timer
+ stats_timer,
+ %% maps message IDs to {channel pid, MsgSeqNo}
+ %% pairs
+ msg_id_to_channel,
+ %% message TTL value
+ ttl,
+ %% timer used to delete expired messages
+ ttl_timer_ref,
+ ttl_timer_expiry,
+ %% Keeps track of channels that publish to this queue.
+ %% When channel process goes down, queues have to perform
+ %% certain cleanup.
+ senders,
+ %% dead letter exchange as a #resource record, if any
+ dlx,
+ dlx_routing_key,
+ %% max length in messages, if configured
+ max_length,
+ %% max length in bytes, if configured
+ max_bytes,
+ %% an action to perform if queue is to be over a limit,
+ %% can be either drop-head (default), reject-publish or reject-publish-dlx
+ overflow,
+ %% when policies change, this version helps queue
+ %% determine what previously scheduled/set up state to ignore,
+ %% e.g. message expiration messages from previously set up timers
+ %% that may or may not be still valid
+ args_policy_version,
+ %% used to discard outdated/superseded policy updates,
+ %% e.g. when policies are applied concurrently. See
+ %% https://github.com/rabbitmq/rabbitmq-server/issues/803 for one
+ %% example.
+ mirroring_policy_version = 0,
+ %% running | flow | idle
+ status,
+ %% true | false
+ single_active_consumer_on
+ }).
+
+%%----------------------------------------------------------------------------
+
+-define(STATISTICS_KEYS,
+ [messages_ready,
+ messages_unacknowledged,
+ messages,
+ reductions,
+ name,
+ policy,
+ operator_policy,
+ effective_policy_definition,
+ exclusive_consumer_pid,
+ exclusive_consumer_tag,
+ single_active_consumer_pid,
+ single_active_consumer_tag,
+ consumers,
+ consumer_utilisation,
+ memory,
+ slave_pids,
+ synchronised_slave_pids,
+ recoverable_slaves,
+ state,
+ garbage_collection
+ ]).
+
+-define(CREATION_EVENT_KEYS,
+ [name,
+ durable,
+ auto_delete,
+ arguments,
+ owner_pid,
+ exclusive,
+ user_who_performed_action
+ ]).
+
+-define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name]]).
+
+%%----------------------------------------------------------------------------
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS ++ rabbit_backing_queue:info_keys().
+statistics_keys() -> ?STATISTICS_KEYS ++ rabbit_backing_queue:info_keys().
+
+%%----------------------------------------------------------------------------
+
+init(Q) ->
+ process_flag(trap_exit, true),
+ ?store_proc_name(amqqueue:get_name(Q)),
+ {ok, init_state(amqqueue:set_pid(Q, self())), hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE},
+ ?MODULE}.
+
+init_state(Q) ->
+ SingleActiveConsumerOn = case rabbit_misc:table_lookup(amqqueue:get_arguments(Q), <<"x-single-active-consumer">>) of
+ {bool, true} -> true;
+ _ -> false
+ end,
+ State = #q{q = Q,
+ active_consumer = none,
+ has_had_consumers = false,
+ consumers = rabbit_queue_consumers:new(),
+ senders = pmon:new(delegate),
+ msg_id_to_channel = #{},
+ status = running,
+ args_policy_version = 0,
+ overflow = 'drop-head',
+ single_active_consumer_on = SingleActiveConsumerOn},
+ rabbit_event:init_stats_timer(State, #q.stats_timer).
+
+init_it(Recover, From, State = #q{q = Q})
+ when ?amqqueue_exclusive_owner_is(Q, none) ->
+ init_it2(Recover, From, State);
+
+%% You used to be able to declare an exclusive durable queue. Sadly we
+%% need to still tidy up after that case, there could be the remnants
+%% of one left over from an upgrade. So that's why we don't enforce
+%% Recover = new here.
+init_it(Recover, From, State = #q{q = Q0}) ->
+ Owner = amqqueue:get_exclusive_owner(Q0),
+ case rabbit_misc:is_process_alive(Owner) of
+ true -> erlang:monitor(process, Owner),
+ init_it2(Recover, From, State);
+ false -> #q{backing_queue = undefined,
+ backing_queue_state = undefined,
+ q = Q} = State,
+ send_reply(From, {owner_died, Q}),
+ BQ = backing_queue_module(Q),
+ {_, Terms} = recovery_status(Recover),
+ BQS = bq_init(BQ, Q, Terms),
+ %% Rely on terminate to delete the queue.
+ log_delete_exclusive(Owner, State),
+ {stop, {shutdown, missing_owner},
+ State#q{backing_queue = BQ, backing_queue_state = BQS}}
+ end.
+
+init_it2(Recover, From, State = #q{q = Q,
+ backing_queue = undefined,
+ backing_queue_state = undefined}) ->
+ {Barrier, TermsOrNew} = recovery_status(Recover),
+ case rabbit_amqqueue:internal_declare(Q, Recover /= new) of
+ {Res, Q1}
+ when ?is_amqqueue(Q1) andalso
+ (Res == created orelse Res == existing) ->
+ case matches(Recover, Q, Q1) of
+ true ->
+ ok = file_handle_cache:register_callback(
+ rabbit_amqqueue, set_maximum_since_use, [self()]),
+ ok = rabbit_memory_monitor:register(
+ self(), {rabbit_amqqueue,
+ set_ram_duration_target, [self()]}),
+ BQ = backing_queue_module(Q1),
+ BQS = bq_init(BQ, Q, TermsOrNew),
+ send_reply(From, {new, Q}),
+ recovery_barrier(Barrier),
+ State1 = process_args_policy(
+ State#q{backing_queue = BQ,
+ backing_queue_state = BQS}),
+ notify_decorators(startup, State),
+ rabbit_event:notify(queue_created,
+ infos(?CREATION_EVENT_KEYS, State1)),
+ rabbit_event:if_enabled(State1, #q.stats_timer,
+ fun() -> emit_stats(State1) end),
+ noreply(State1);
+ false ->
+ {stop, normal, {existing, Q1}, State}
+ end;
+ Err ->
+ {stop, normal, Err, State}
+ end.
+
+recovery_status(new) -> {no_barrier, new};
+recovery_status({Recover, Terms}) -> {Recover, Terms}.
+
+send_reply(none, _Q) -> ok;
+send_reply(From, Q) -> gen_server2:reply(From, Q).
+
+matches(new, Q1, Q2) ->
+ %% i.e. not policy
+ amqqueue:get_name(Q1) =:= amqqueue:get_name(Q2) andalso
+ amqqueue:is_durable(Q1) =:= amqqueue:is_durable(Q2) andalso
+ amqqueue:is_auto_delete(Q1) =:= amqqueue:is_auto_delete(Q2) andalso
+ amqqueue:get_exclusive_owner(Q1) =:= amqqueue:get_exclusive_owner(Q2) andalso
+ amqqueue:get_arguments(Q1) =:= amqqueue:get_arguments(Q2) andalso
+ amqqueue:get_pid(Q1) =:= amqqueue:get_pid(Q2) andalso
+ amqqueue:get_slave_pids(Q1) =:= amqqueue:get_slave_pids(Q2);
+%% FIXME: Should v1 vs. v2 of the same record match?
+matches(_, Q, Q) -> true;
+matches(_, _Q, _Q1) -> false.
+
+recovery_barrier(no_barrier) ->
+ ok;
+recovery_barrier(BarrierPid) ->
+ MRef = erlang:monitor(process, BarrierPid),
+ receive
+ {BarrierPid, go} -> erlang:demonitor(MRef, [flush]);
+ {'DOWN', MRef, process, _, _} -> ok
+ end.
+
+-spec init_with_backing_queue_state
+ (amqqueue:amqqueue(), atom(), tuple(), any(),
+ [rabbit_types:delivery()], pmon:pmon(), map()) ->
+ #q{}.
+
+init_with_backing_queue_state(Q, BQ, BQS,
+ RateTRef, Deliveries, Senders, MTC) ->
+ Owner = amqqueue:get_exclusive_owner(Q),
+ case Owner of
+ none -> ok;
+ _ -> erlang:monitor(process, Owner)
+ end,
+ State = init_state(Q),
+ State1 = State#q{backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = RateTRef,
+ senders = Senders,
+ msg_id_to_channel = MTC},
+ State2 = process_args_policy(State1),
+ State3 = lists:foldl(fun (Delivery, StateN) ->
+ maybe_deliver_or_enqueue(Delivery, true, StateN)
+ end, State2, Deliveries),
+ notify_decorators(startup, State3),
+ State3.
+
+terminate(shutdown = R, State = #q{backing_queue = BQ, q = Q0}) ->
+ QName = amqqueue:get_name(Q0),
+ rabbit_core_metrics:queue_deleted(qname(State)),
+ terminate_shutdown(
+ fun (BQS) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [Q] = mnesia:read({rabbit_queue, QName}),
+ Q2 = amqqueue:set_state(Q, stopped),
+ %% amqqueue migration:
+ %% The amqqueue was read from this transaction, no need
+ %% to handle migration.
+ rabbit_amqqueue:store_queue(Q2)
+ end),
+ BQ:terminate(R, BQS)
+ end, State);
+terminate({shutdown, missing_owner} = Reason, State) ->
+ %% if the owner was missing then there will be no queue, so don't emit stats
+ terminate_shutdown(terminate_delete(false, Reason, State), State);
+terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) ->
+ rabbit_core_metrics:queue_deleted(qname(State)),
+ terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
+terminate(normal, State = #q{status = {terminated_by, auto_delete}}) ->
+ %% auto_delete case
+ %% To increase performance we want to avoid a mnesia_sync:sync call
+ %% after every transaction, as we could be deleting simultaneously
+ %% thousands of queues. A optimisation introduced by server#1513
+ %% needs to be reverted by this case, avoiding to guard the delete
+ %% operation on `rabbit_durable_queue`
+ terminate_shutdown(terminate_delete(true, auto_delete, State), State);
+terminate(normal, State) -> %% delete case
+ terminate_shutdown(terminate_delete(true, normal, State), State);
+%% If we crashed don't try to clean up the BQS, probably best to leave it.
+terminate(_Reason, State = #q{q = Q}) ->
+ terminate_shutdown(fun (BQS) ->
+ Q2 = amqqueue:set_state(Q, crashed),
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ rabbit_amqqueue:store_queue(Q2),
+ begin
+ Q3 = amqqueue:upgrade(Q2),
+ rabbit_amqqueue:store_queue(Q3)
+ end)
+ end),
+ BQS
+ end, State).
+
+terminate_delete(EmitStats, Reason0,
+ State = #q{q = Q,
+ backing_queue = BQ,
+ status = Status}) ->
+ QName = amqqueue:get_name(Q),
+ ActingUser = terminated_by(Status),
+ fun (BQS) ->
+ Reason = case Reason0 of
+ auto_delete -> normal;
+ Any -> Any
+ end,
+ BQS1 = BQ:delete_and_terminate(Reason, BQS),
+ if EmitStats -> rabbit_event:if_enabled(State, #q.stats_timer,
+ fun() -> emit_stats(State) end);
+ true -> ok
+ end,
+ %% This try-catch block transforms throws to errors since throws are not
+ %% logged.
+ try
+ %% don't care if the internal delete doesn't return 'ok'.
+ rabbit_amqqueue:internal_delete(QName, ActingUser, Reason0)
+ catch
+ {error, ReasonE} -> error(ReasonE)
+ end,
+ BQS1
+ end.
+
+terminated_by({terminated_by, auto_delete}) ->
+ ?INTERNAL_USER;
+terminated_by({terminated_by, ActingUser}) ->
+ ActingUser;
+terminated_by(_) ->
+ ?INTERNAL_USER.
+
+terminate_shutdown(Fun, #q{status = Status} = State) ->
+ ActingUser = terminated_by(Status),
+ State1 = #q{backing_queue_state = BQS, consumers = Consumers} =
+ lists:foldl(fun (F, S) -> F(S) end, State,
+ [fun stop_sync_timer/1,
+ fun stop_rate_timer/1,
+ fun stop_expiry_timer/1,
+ fun stop_ttl_timer/1]),
+ case BQS of
+ undefined -> State1;
+ _ -> ok = rabbit_memory_monitor:deregister(self()),
+ QName = qname(State),
+ notify_decorators(shutdown, State),
+ [emit_consumer_deleted(Ch, CTag, QName, ActingUser) ||
+ {Ch, CTag, _, _, _, _, _, _} <-
+ rabbit_queue_consumers:all(Consumers)],
+ State1#q{backing_queue_state = Fun(BQS)}
+ end.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+maybe_notify_decorators(false, State) -> State;
+maybe_notify_decorators(true, State) -> notify_decorators(State), State.
+
+notify_decorators(Event, State) -> decorator_callback(qname(State), Event, []).
+
+notify_decorators(State = #q{consumers = Consumers,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ P = rabbit_queue_consumers:max_active_priority(Consumers),
+ decorator_callback(qname(State), consumer_state_changed,
+ [P, BQ:is_empty(BQS)]).
+
+decorator_callback(QName, F, A) ->
+ %% Look up again in case policy and hence decorators have changed
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ Ds = amqqueue:get_decorators(Q),
+ [ok = apply(M, F, [Q|A]) || M <- rabbit_queue_decorator:select(Ds)];
+ {error, not_found} ->
+ ok
+ end.
+
+bq_init(BQ, Q, Recover) ->
+ Self = self(),
+ BQ:init(Q, Recover,
+ fun (Mod, Fun) ->
+ rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
+ end).
+
+process_args_policy(State = #q{q = Q,
+ args_policy_version = N}) ->
+ ArgsTable =
+ [{<<"expires">>, fun res_min/2, fun init_exp/2},
+ {<<"dead-letter-exchange">>, fun res_arg/2, fun init_dlx/2},
+ {<<"dead-letter-routing-key">>, fun res_arg/2, fun init_dlx_rkey/2},
+ {<<"message-ttl">>, fun res_min/2, fun init_ttl/2},
+ {<<"max-length">>, fun res_min/2, fun init_max_length/2},
+ {<<"max-length-bytes">>, fun res_min/2, fun init_max_bytes/2},
+ {<<"overflow">>, fun res_arg/2, fun init_overflow/2},
+ {<<"queue-mode">>, fun res_arg/2, fun init_queue_mode/2}],
+ drop_expired_msgs(
+ lists:foldl(fun({Name, Resolve, Fun}, StateN) ->
+ Fun(rabbit_queue_type_util:args_policy_lookup(Name, Resolve, Q), StateN)
+ end, State#q{args_policy_version = N + 1}, ArgsTable)).
+
+res_arg(_PolVal, ArgVal) -> ArgVal.
+res_min(PolVal, ArgVal) -> erlang:min(PolVal, ArgVal).
+
+%% In both these we init with the undefined variant first to stop any
+%% existing timer, then start a new one which may fire after a
+%% different time.
+init_exp(undefined, State) -> stop_expiry_timer(State#q{expires = undefined});
+init_exp(Expires, State) -> State1 = init_exp(undefined, State),
+ ensure_expiry_timer(State1#q{expires = Expires}).
+
+init_ttl(undefined, State) -> stop_ttl_timer(State#q{ttl = undefined});
+init_ttl(TTL, State) -> (init_ttl(undefined, State))#q{ttl = TTL}.
+
+init_dlx(undefined, State) ->
+ State#q{dlx = undefined};
+init_dlx(DLX, State = #q{q = Q}) ->
+ QName = amqqueue:get_name(Q),
+ State#q{dlx = rabbit_misc:r(QName, exchange, DLX)}.
+
+init_dlx_rkey(RoutingKey, State) -> State#q{dlx_routing_key = RoutingKey}.
+
+init_max_length(MaxLen, State) ->
+ {_Dropped, State1} = maybe_drop_head(State#q{max_length = MaxLen}),
+ State1.
+
+init_max_bytes(MaxBytes, State) ->
+ {_Dropped, State1} = maybe_drop_head(State#q{max_bytes = MaxBytes}),
+ State1.
+
+%% Reset overflow to default 'drop-head' value if it's undefined.
+init_overflow(undefined, #q{overflow = 'drop-head'} = State) ->
+ State;
+init_overflow(undefined, State) ->
+ {_Dropped, State1} = maybe_drop_head(State#q{overflow = 'drop-head'}),
+ State1;
+init_overflow(Overflow, State) ->
+ OverflowVal = binary_to_existing_atom(Overflow, utf8),
+ case OverflowVal of
+ 'drop-head' ->
+ {_Dropped, State1} = maybe_drop_head(State#q{overflow = OverflowVal}),
+ State1;
+ _ ->
+ State#q{overflow = OverflowVal}
+ end.
+
+init_queue_mode(undefined, State) ->
+ State;
+init_queue_mode(Mode, State = #q {backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQS1 = BQ:set_queue_mode(binary_to_existing_atom(Mode, utf8), BQS),
+ State#q{backing_queue_state = BQS1}.
+
+reply(Reply, NewState) ->
+ {NewState1, Timeout} = next_state(NewState),
+ {reply, Reply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
+
+noreply(NewState) ->
+ {NewState1, Timeout} = next_state(NewState),
+ {noreply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
+
+next_state(State = #q{q = Q,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}) ->
+ assert_invariant(State),
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ MTC1 = confirm_messages(MsgIds, MTC, amqqueue:get_name(Q)),
+ State1 = State#q{backing_queue_state = BQS1, msg_id_to_channel = MTC1},
+ case BQ:needs_timeout(BQS1) of
+ false -> {stop_sync_timer(State1), hibernate };
+ idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL};
+ timed -> {ensure_sync_timer(State1), 0 }
+ end.
+
+backing_queue_module(Q) ->
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> {ok, BQM} = application:get_env(backing_queue_module),
+ BQM;
+ true -> rabbit_mirror_queue_master
+ end.
+
+ensure_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #q.sync_timer_ref,
+ ?SYNC_INTERVAL, sync_timeout).
+
+stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #q.sync_timer_ref).
+
+ensure_rate_timer(State) ->
+ rabbit_misc:ensure_timer(State, #q.rate_timer_ref,
+ ?RAM_DURATION_UPDATE_INTERVAL,
+ update_ram_duration).
+
+stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #q.rate_timer_ref).
+
+%% We wish to expire only when there are no consumers *and* the expiry
+%% hasn't been refreshed (by queue.declare or basic.get) for the
+%% configured period.
+ensure_expiry_timer(State = #q{expires = undefined}) ->
+ State;
+ensure_expiry_timer(State = #q{expires = Expires,
+ args_policy_version = Version}) ->
+ case is_unused(State) of
+ true -> NewState = stop_expiry_timer(State),
+ rabbit_misc:ensure_timer(NewState, #q.expiry_timer_ref,
+ Expires, {maybe_expire, Version});
+ false -> State
+ end.
+
+stop_expiry_timer(State) -> rabbit_misc:stop_timer(State, #q.expiry_timer_ref).
+
+ensure_ttl_timer(undefined, State) ->
+ State;
+ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = undefined,
+ args_policy_version = Version}) ->
+ After = (case Expiry - os:system_time(micro_seconds) of
+ V when V > 0 -> V + 999; %% always fire later
+ _ -> 0
+ end) div 1000,
+ TRef = rabbit_misc:send_after(After, self(), {drop_expired, Version}),
+ State#q{ttl_timer_ref = TRef, ttl_timer_expiry = Expiry};
+ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = TRef,
+ ttl_timer_expiry = TExpiry})
+ when Expiry + 1000 < TExpiry ->
+ rabbit_misc:cancel_timer(TRef),
+ ensure_ttl_timer(Expiry, State#q{ttl_timer_ref = undefined});
+ensure_ttl_timer(_Expiry, State) ->
+ State.
+
+stop_ttl_timer(State) -> rabbit_misc:stop_timer(State, #q.ttl_timer_ref).
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State, #q.stats_timer, emit_stats).
+
+assert_invariant(#q{single_active_consumer_on = true}) ->
+ %% queue may contain messages and have available consumers with exclusive consumer
+ ok;
+assert_invariant(State = #q{consumers = Consumers, single_active_consumer_on = false}) ->
+ true = (rabbit_queue_consumers:inactive(Consumers) orelse is_empty(State)).
+
+is_empty(#q{backing_queue = BQ, backing_queue_state = BQS}) -> BQ:is_empty(BQS).
+
+maybe_send_drained(WasEmpty, State) ->
+ case (not WasEmpty) andalso is_empty(State) of
+ true -> notify_decorators(State),
+ rabbit_queue_consumers:send_drained();
+ false -> ok
+ end,
+ State.
+
+confirm_messages([], MTC, _QName) ->
+ MTC;
+confirm_messages(MsgIds, MTC, QName) ->
+ {CMs, MTC1} =
+ lists:foldl(
+ fun(MsgId, {CMs, MTC0}) ->
+ case maps:get(MsgId, MTC0, none) of
+ none ->
+ {CMs, MTC0};
+ {SenderPid, MsgSeqNo} ->
+ {maps:update_with(SenderPid,
+ fun(MsgSeqNos) ->
+ [MsgSeqNo | MsgSeqNos]
+ end,
+ [MsgSeqNo],
+ CMs),
+ maps:remove(MsgId, MTC0)}
+
+ end
+ end, {#{}, MTC}, MsgIds),
+ maps:fold(
+ fun(Pid, MsgSeqNos, _) ->
+ confirm_to_sender(Pid, QName, MsgSeqNos)
+ end,
+ ok,
+ CMs),
+ MTC1.
+
+send_or_record_confirm(#delivery{confirm = false}, State) ->
+ {never, State};
+send_or_record_confirm(#delivery{confirm = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo,
+ message = #basic_message {
+ is_persistent = true,
+ id = MsgId}},
+ State = #q{q = Q,
+ msg_id_to_channel = MTC})
+ when ?amqqueue_is_durable(Q) ->
+ MTC1 = maps:put(MsgId, {SenderPid, MsgSeqNo}, MTC),
+ {eventually, State#q{msg_id_to_channel = MTC1}};
+send_or_record_confirm(#delivery{confirm = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo},
+ #q{q = Q} = State) ->
+ confirm_to_sender(SenderPid, amqqueue:get_name(Q), [MsgSeqNo]),
+ {immediately, State}.
+
+%% This feature was used by `rabbit_amqqueue_process` and
+%% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x. It is
+%% unused in 3.8.x and thus deprecated. We keep it to support in-place
+%% upgrades to 3.8.x (i.e. mixed-version clusters), but it is a no-op
+%% starting with that version.
+send_mandatory(#delivery{mandatory = false}) ->
+ ok;
+send_mandatory(#delivery{mandatory = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo}) ->
+ gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}).
+
+discard(#delivery{confirm = Confirm,
+ sender = SenderPid,
+ flow = Flow,
+ message = #basic_message{id = MsgId}}, BQ, BQS, MTC, QName) ->
+ MTC1 = case Confirm of
+ true -> confirm_messages([MsgId], MTC, QName);
+ false -> MTC
+ end,
+ BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS),
+ {BQS1, MTC1}.
+
+run_message_queue(State) -> run_message_queue(false, State).
+
+run_message_queue(ActiveConsumersChanged, State) ->
+ case is_empty(State) of
+ true -> maybe_notify_decorators(ActiveConsumersChanged, State);
+ false -> case rabbit_queue_consumers:deliver(
+ fun(AckRequired) -> fetch(AckRequired, State) end,
+ qname(State), State#q.consumers,
+ State#q.single_active_consumer_on, State#q.active_consumer) of
+ {delivered, ActiveConsumersChanged1, State1, Consumers} ->
+ run_message_queue(
+ ActiveConsumersChanged or ActiveConsumersChanged1,
+ State1#q{consumers = Consumers});
+ {undelivered, ActiveConsumersChanged1, Consumers} ->
+ maybe_notify_decorators(
+ ActiveConsumersChanged or ActiveConsumersChanged1,
+ State#q{consumers = Consumers})
+ end
+ end.
+
+attempt_delivery(Delivery = #delivery{sender = SenderPid,
+ flow = Flow,
+ message = Message},
+ Props, Delivered, State = #q{q = Q,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}) ->
+ case rabbit_queue_consumers:deliver(
+ fun (true) -> true = BQ:is_empty(BQS),
+ {AckTag, BQS1} =
+ BQ:publish_delivered(
+ Message, Props, SenderPid, Flow, BQS),
+ {{Message, Delivered, AckTag}, {BQS1, MTC}};
+ (false) -> {{Message, Delivered, undefined},
+ discard(Delivery, BQ, BQS, MTC, amqqueue:get_name(Q))}
+ end, qname(State), State#q.consumers, State#q.single_active_consumer_on, State#q.active_consumer) of
+ {delivered, ActiveConsumersChanged, {BQS1, MTC1}, Consumers} ->
+ {delivered, maybe_notify_decorators(
+ ActiveConsumersChanged,
+ State#q{backing_queue_state = BQS1,
+ msg_id_to_channel = MTC1,
+ consumers = Consumers})};
+ {undelivered, ActiveConsumersChanged, Consumers} ->
+ {undelivered, maybe_notify_decorators(
+ ActiveConsumersChanged,
+ State#q{consumers = Consumers})}
+ end.
+
+maybe_deliver_or_enqueue(Delivery = #delivery{message = Message},
+ Delivered,
+ State = #q{overflow = Overflow,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ dlx = DLX,
+ dlx_routing_key = RK}) ->
+ send_mandatory(Delivery), %% must do this before confirms
+ case {will_overflow(Delivery, State), Overflow} of
+ {true, 'reject-publish'} ->
+ %% Drop publish and nack to publisher
+ send_reject_publish(Delivery, Delivered, State);
+ {true, 'reject-publish-dlx'} ->
+ %% Publish to DLX
+ with_dlx(
+ DLX,
+ fun (X) ->
+ QName = qname(State),
+ rabbit_dead_letter:publish(Message, maxlen, X, RK, QName)
+ end,
+ fun () -> ok end),
+ %% Drop publish and nack to publisher
+ send_reject_publish(Delivery, Delivered, State);
+ _ ->
+ {IsDuplicate, BQS1} = BQ:is_duplicate(Message, BQS),
+ State1 = State#q{backing_queue_state = BQS1},
+ case IsDuplicate of
+ true -> State1;
+ {true, drop} -> State1;
+ %% Drop publish and nack to publisher
+ {true, reject} ->
+ send_reject_publish(Delivery, Delivered, State1);
+ %% Enqueue and maybe drop head later
+ false ->
+ deliver_or_enqueue(Delivery, Delivered, State1)
+ end
+ end.
+
+deliver_or_enqueue(Delivery = #delivery{message = Message,
+ sender = SenderPid,
+ flow = Flow},
+ Delivered,
+ State = #q{q = Q, backing_queue = BQ}) ->
+ {Confirm, State1} = send_or_record_confirm(Delivery, State),
+ Props = message_properties(Message, Confirm, State1),
+ case attempt_delivery(Delivery, Props, Delivered, State1) of
+ {delivered, State2} ->
+ State2;
+ %% The next one is an optimisation
+ {undelivered, State2 = #q{ttl = 0, dlx = undefined,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}} ->
+ {BQS1, MTC1} = discard(Delivery, BQ, BQS, MTC, amqqueue:get_name(Q)),
+ State2#q{backing_queue_state = BQS1, msg_id_to_channel = MTC1};
+ {undelivered, State2 = #q{backing_queue_state = BQS}} ->
+
+ BQS1 = BQ:publish(Message, Props, Delivered, SenderPid, Flow, BQS),
+ {Dropped, State3 = #q{backing_queue_state = BQS2}} =
+ maybe_drop_head(State2#q{backing_queue_state = BQS1}),
+ QLen = BQ:len(BQS2),
+ %% optimisation: it would be perfectly safe to always
+ %% invoke drop_expired_msgs here, but that is expensive so
+ %% we only do that if a new message that might have an
+ %% expiry ends up at the head of the queue. If the head
+ %% remains unchanged, or if the newly published message
+ %% has no expiry and becomes the head of the queue then
+ %% the call is unnecessary.
+ case {Dropped, QLen =:= 1, Props#message_properties.expiry} of
+ {false, false, _} -> State3;
+ {true, true, undefined} -> State3;
+ {_, _, _} -> drop_expired_msgs(State3)
+ end
+ end.
+
+maybe_drop_head(State = #q{max_length = undefined,
+ max_bytes = undefined}) ->
+ {false, State};
+maybe_drop_head(State = #q{overflow = 'reject-publish'}) ->
+ {false, State};
+maybe_drop_head(State = #q{overflow = 'reject-publish-dlx'}) ->
+ {false, State};
+maybe_drop_head(State = #q{overflow = 'drop-head'}) ->
+ maybe_drop_head(false, State).
+
+maybe_drop_head(AlreadyDropped, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ case over_max_length(State) of
+ true ->
+ maybe_drop_head(true,
+ with_dlx(
+ State#q.dlx,
+ fun (X) -> dead_letter_maxlen_msg(X, State) end,
+ fun () ->
+ {_, BQS1} = BQ:drop(false, BQS),
+ State#q{backing_queue_state = BQS1}
+ end));
+ false ->
+ {AlreadyDropped, State}
+ end.
+
+send_reject_publish(#delivery{confirm = true,
+ sender = SenderPid,
+ flow = Flow,
+ msg_seq_no = MsgSeqNo,
+ message = #basic_message{id = MsgId}},
+ _Delivered,
+ State = #q{ q = Q,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}) ->
+ ok = rabbit_classic_queue:send_rejection(SenderPid,
+ amqqueue:get_name(Q), MsgSeqNo),
+
+ MTC1 = maps:remove(MsgId, MTC),
+ BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS),
+ State#q{ backing_queue_state = BQS1, msg_id_to_channel = MTC1 };
+send_reject_publish(#delivery{confirm = false},
+ _Delivered, State) ->
+ State.
+
+will_overflow(_, #q{max_length = undefined,
+ max_bytes = undefined}) -> false;
+will_overflow(#delivery{message = Message},
+ #q{max_length = MaxLen,
+ max_bytes = MaxBytes,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ ExpectedQueueLength = BQ:len(BQS) + 1,
+
+ #basic_message{content = #content{payload_fragments_rev = PFR}} = Message,
+ MessageSize = iolist_size(PFR),
+ ExpectedQueueSizeBytes = BQ:info(message_bytes_ready, BQS) + MessageSize,
+
+ ExpectedQueueLength > MaxLen orelse ExpectedQueueSizeBytes > MaxBytes.
+
+over_max_length(#q{max_length = MaxLen,
+ max_bytes = MaxBytes,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQ:len(BQS) > MaxLen orelse BQ:info(message_bytes_ready, BQS) > MaxBytes.
+
+requeue_and_run(AckTags, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ WasEmpty = BQ:is_empty(BQS),
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ {_Dropped, State1} = maybe_drop_head(State#q{backing_queue_state = BQS1}),
+ run_message_queue(maybe_send_drained(WasEmpty, drop_expired_msgs(State1))).
+
+fetch(AckRequired, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {Result, BQS1} = BQ:fetch(AckRequired, BQS),
+ State1 = drop_expired_msgs(State#q{backing_queue_state = BQS1}),
+ {Result, maybe_send_drained(Result =:= empty, State1)}.
+
+ack(AckTags, ChPid, State) ->
+ subtract_acks(ChPid, AckTags, State,
+ fun (State1 = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {_Guids, BQS1} = BQ:ack(AckTags, BQS),
+ State1#q{backing_queue_state = BQS1}
+ end).
+
+requeue(AckTags, ChPid, State) ->
+ subtract_acks(ChPid, AckTags, State,
+ fun (State1) -> requeue_and_run(AckTags, State1) end).
+
+possibly_unblock(Update, ChPid, State = #q{consumers = Consumers}) ->
+ case rabbit_queue_consumers:possibly_unblock(Update, ChPid, Consumers) of
+ unchanged -> State;
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, State1)
+ end.
+
+should_auto_delete(#q{q = Q})
+ when not ?amqqueue_is_auto_delete(Q) -> false;
+should_auto_delete(#q{has_had_consumers = false}) -> false;
+should_auto_delete(State) -> is_unused(State).
+
+handle_ch_down(DownPid, State = #q{consumers = Consumers,
+ active_consumer = Holder,
+ single_active_consumer_on = SingleActiveConsumerOn,
+ senders = Senders}) ->
+ State1 = State#q{senders = case pmon:is_monitored(DownPid, Senders) of
+ false ->
+ Senders;
+ true ->
+ %% A rabbit_channel process died. Here credit_flow will take care
+ %% of cleaning up the rabbit_amqqueue_process process dictionary
+ %% with regards to the credit we were tracking for the channel
+ %% process. See handle_cast({deliver, Deliver}, State) in this
+ %% module. In that cast function we process deliveries from the
+ %% channel, which means we credit_flow:ack/1 said
+ %% messages. credit_flow:ack'ing messages means we are increasing
+ %% a counter to know when we need to send MoreCreditAfter. Since
+ %% the process died, the credit_flow flow module will clean up
+ %% that for us.
+ credit_flow:peer_down(DownPid),
+ pmon:demonitor(DownPid, Senders)
+ end},
+ case rabbit_queue_consumers:erase_ch(DownPid, Consumers) of
+ not_found ->
+ {ok, State1};
+ {ChAckTags, ChCTags, Consumers1} ->
+ QName = qname(State1),
+ [emit_consumer_deleted(DownPid, CTag, QName, ?INTERNAL_USER) || CTag <- ChCTags],
+ Holder1 = new_single_active_consumer_after_channel_down(DownPid, Holder, SingleActiveConsumerOn, Consumers1),
+ State2 = State1#q{consumers = Consumers1,
+ active_consumer = Holder1},
+ maybe_notify_consumer_updated(State2, Holder, Holder1),
+ notify_decorators(State2),
+ case should_auto_delete(State2) of
+ true ->
+ log_auto_delete(
+ io_lib:format(
+ "because all of its consumers (~p) were on a channel that was closed",
+ [length(ChCTags)]),
+ State),
+ {stop, State2};
+ false -> {ok, requeue_and_run(ChAckTags,
+ ensure_expiry_timer(State2))}
+ end
+ end.
+
+new_single_active_consumer_after_channel_down(DownChPid, CurrentSingleActiveConsumer, _SingleActiveConsumerIsOn = true, Consumers) ->
+ case CurrentSingleActiveConsumer of
+ {DownChPid, _} ->
+ % the single active consumer is on the down channel, we have to replace it
+ case rabbit_queue_consumers:get_consumer(Consumers) of
+ undefined -> none;
+ Consumer -> Consumer
+ end;
+ _ ->
+ CurrentSingleActiveConsumer
+ end;
+new_single_active_consumer_after_channel_down(DownChPid, CurrentSingleActiveConsumer, _SingleActiveConsumerIsOn = false, _Consumers) ->
+ case CurrentSingleActiveConsumer of
+ {DownChPid, _} -> none;
+ Other -> Other
+ end.
+
+check_exclusive_access({_ChPid, _ConsumerTag}, _ExclusiveConsume, _State) ->
+ in_use;
+check_exclusive_access(none, false, _State) ->
+ ok;
+check_exclusive_access(none, true, State) ->
+ case is_unused(State) of
+ true -> ok;
+ false -> in_use
+ end.
+
+is_unused(_State) -> rabbit_queue_consumers:count() == 0.
+
+maybe_send_reply(_ChPid, undefined) -> ok;
+maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
+
+qname(#q{q = Q}) -> amqqueue:get_name(Q).
+
+backing_queue_timeout(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ State#q{backing_queue_state = BQ:timeout(BQS)}.
+
+subtract_acks(ChPid, AckTags, State = #q{consumers = Consumers}, Fun) ->
+ case rabbit_queue_consumers:subtract_acks(ChPid, AckTags, Consumers) of
+ not_found -> State;
+ unchanged -> Fun(State);
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, Fun(State1))
+ end.
+
+message_properties(Message = #basic_message{content = Content},
+ Confirm, #q{ttl = TTL}) ->
+ #content{payload_fragments_rev = PFR} = Content,
+ #message_properties{expiry = calculate_msg_expiry(Message, TTL),
+ needs_confirming = Confirm == eventually,
+ size = iolist_size(PFR)}.
+
+calculate_msg_expiry(#basic_message{content = Content}, TTL) ->
+ #content{properties = Props} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ %% We assert that the expiration must be valid - we check in the channel.
+ {ok, MsgTTL} = rabbit_basic:parse_expiration(Props),
+ case lists:min([TTL, MsgTTL]) of
+ undefined -> undefined;
+ T -> os:system_time(micro_seconds) + T * 1000
+ end.
+
+%% Logically this function should invoke maybe_send_drained/2.
+%% However, that is expensive. Since some frequent callers of
+%% drop_expired_msgs/1, in particular deliver_or_enqueue/3, cannot
+%% possibly cause the queue to become empty, we push the
+%% responsibility to the callers. So be cautious when adding new ones.
+drop_expired_msgs(State) ->
+ case is_empty(State) of
+ true -> State;
+ false -> drop_expired_msgs(os:system_time(micro_seconds),
+ State)
+ end.
+
+drop_expired_msgs(Now, State = #q{backing_queue_state = BQS,
+ backing_queue = BQ }) ->
+ ExpirePred = fun (#message_properties{expiry = Exp}) -> Now >= Exp end,
+ {Props, State1} =
+ with_dlx(
+ State#q.dlx,
+ fun (X) -> dead_letter_expired_msgs(ExpirePred, X, State) end,
+ fun () -> {Next, BQS1} = BQ:dropwhile(ExpirePred, BQS),
+ {Next, State#q{backing_queue_state = BQS1}} end),
+ ensure_ttl_timer(case Props of
+ undefined -> undefined;
+ #message_properties{expiry = Exp} -> Exp
+ end, State1).
+
+with_dlx(undefined, _With, Without) -> Without();
+with_dlx(DLX, With, Without) -> case rabbit_exchange:lookup(DLX) of
+ {ok, X} -> With(X);
+ {error, not_found} -> Without()
+ end.
+
+dead_letter_expired_msgs(ExpirePred, X, State = #q{backing_queue = BQ}) ->
+ dead_letter_msgs(fun (DLFun, Acc, BQS1) ->
+ BQ:fetchwhile(ExpirePred, DLFun, Acc, BQS1)
+ end, expired, X, State).
+
+dead_letter_rejected_msgs(AckTags, X, State = #q{backing_queue = BQ}) ->
+ {ok, State1} =
+ dead_letter_msgs(
+ fun (DLFun, Acc, BQS) ->
+ {Acc1, BQS1} = BQ:ackfold(DLFun, Acc, BQS, AckTags),
+ {ok, Acc1, BQS1}
+ end, rejected, X, State),
+ State1.
+
+dead_letter_maxlen_msg(X, State = #q{backing_queue = BQ}) ->
+ {ok, State1} =
+ dead_letter_msgs(
+ fun (DLFun, Acc, BQS) ->
+ {{Msg, _, AckTag}, BQS1} = BQ:fetch(true, BQS),
+ {ok, DLFun(Msg, AckTag, Acc), BQS1}
+ end, maxlen, X, State),
+ State1.
+
+dead_letter_msgs(Fun, Reason, X, State = #q{dlx_routing_key = RK,
+ backing_queue_state = BQS,
+ backing_queue = BQ}) ->
+ QName = qname(State),
+ {Res, Acks1, BQS1} =
+ Fun(fun (Msg, AckTag, Acks) ->
+ rabbit_dead_letter:publish(Msg, Reason, X, RK, QName),
+ [AckTag | Acks]
+ end, [], BQS),
+ {_Guids, BQS2} = BQ:ack(Acks1, BQS1),
+ {Res, State#q{backing_queue_state = BQS2}}.
+
+stop(State) -> stop(noreply, State).
+
+stop(noreply, State) -> {stop, normal, State};
+stop(Reply, State) -> {stop, normal, Reply, State}.
+
+infos(Items, #q{q = Q} = State) ->
+ lists:foldr(fun(totals, Acc) ->
+ [{messages_ready, i(messages_ready, State)},
+ {messages, i(messages, State)},
+ {messages_unacknowledged, i(messages_unacknowledged, State)}] ++ Acc;
+ (type_specific, Acc) ->
+ format(Q) ++ Acc;
+ (Item, Acc) ->
+ [{Item, i(Item, State)} | Acc]
+ end, [], Items).
+
+i(name, #q{q = Q}) -> amqqueue:get_name(Q);
+i(durable, #q{q = Q}) -> amqqueue:is_durable(Q);
+i(auto_delete, #q{q = Q}) -> amqqueue:is_auto_delete(Q);
+i(arguments, #q{q = Q}) -> amqqueue:get_arguments(Q);
+i(pid, _) ->
+ self();
+i(owner_pid, #q{q = Q}) when ?amqqueue_exclusive_owner_is(Q, none) ->
+ '';
+i(owner_pid, #q{q = Q}) ->
+ amqqueue:get_exclusive_owner(Q);
+i(exclusive, #q{q = Q}) ->
+ ExclusiveOwner = amqqueue:get_exclusive_owner(Q),
+ is_pid(ExclusiveOwner);
+i(policy, #q{q = Q}) ->
+ case rabbit_policy:name(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(operator_policy, #q{q = Q}) ->
+ case rabbit_policy:name_op(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(effective_policy_definition, #q{q = Q}) ->
+ case rabbit_policy:effective_definition(Q) of
+ undefined -> [];
+ Def -> Def
+ end;
+i(exclusive_consumer_pid, #q{active_consumer = {ChPid, _ConsumerTag}, single_active_consumer_on = false}) ->
+ ChPid;
+i(exclusive_consumer_pid, _) ->
+ '';
+i(exclusive_consumer_tag, #q{active_consumer = {_ChPid, ConsumerTag}, single_active_consumer_on = false}) ->
+ ConsumerTag;
+i(exclusive_consumer_tag, _) ->
+ '';
+i(single_active_consumer_pid, #q{active_consumer = {ChPid, _Consumer}, single_active_consumer_on = true}) ->
+ ChPid;
+i(single_active_consumer_pid, _) ->
+ '';
+i(single_active_consumer_tag, #q{active_consumer = {_ChPid, Consumer}, single_active_consumer_on = true}) ->
+ rabbit_queue_consumers:consumer_tag(Consumer);
+i(single_active_consumer_tag, _) ->
+ '';
+i(messages_ready, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ BQ:len(BQS);
+i(messages_unacknowledged, _) ->
+ rabbit_queue_consumers:unacknowledged_message_count();
+i(messages, State) ->
+ lists:sum([i(Item, State) || Item <- [messages_ready,
+ messages_unacknowledged]]);
+i(consumers, _) ->
+ rabbit_queue_consumers:count();
+i(consumer_utilisation, #q{consumers = Consumers}) ->
+ case rabbit_queue_consumers:count() of
+ 0 -> '';
+ _ -> rabbit_queue_consumers:utilisation(Consumers)
+ end;
+i(memory, _) ->
+ {memory, M} = process_info(self(), memory),
+ M;
+i(slave_pids, #q{q = Q0}) ->
+ Name = amqqueue:get_name(Q0),
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> '';
+ true -> amqqueue:get_slave_pids(Q)
+ end;
+i(synchronised_slave_pids, #q{q = Q0}) ->
+ Name = amqqueue:get_name(Q0),
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> '';
+ true -> amqqueue:get_sync_slave_pids(Q)
+ end;
+i(recoverable_slaves, #q{q = Q0}) ->
+ Name = amqqueue:get_name(Q0),
+ Durable = amqqueue:is_durable(Q0),
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ case Durable andalso rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> '';
+ true -> amqqueue:get_recoverable_slaves(Q)
+ end;
+i(state, #q{status = running}) -> credit_flow:state();
+i(state, #q{status = State}) -> State;
+i(garbage_collection, _State) ->
+ rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+i(user_who_performed_action, #q{q = Q}) ->
+ Opts = amqqueue:get_options(Q),
+ maps:get(user, Opts, ?UNKNOWN_USER);
+i(type, _) -> classic;
+i(Item, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ BQ:info(Item, BQS).
+
+emit_stats(State) ->
+ emit_stats(State, []).
+
+emit_stats(State, Extra) ->
+ ExtraKs = [K || {K, _} <- Extra],
+ [{messages_ready, MR}, {messages_unacknowledged, MU}, {messages, M},
+ {reductions, R}, {name, Name} | Infos] = All
+ = [{K, V} || {K, V} <- infos(statistics_keys(), State),
+ not lists:member(K, ExtraKs)],
+ rabbit_core_metrics:queue_stats(Name, Extra ++ Infos),
+ rabbit_core_metrics:queue_stats(Name, MR, MU, M, R),
+ rabbit_event:notify(queue_stats, Extra ++ All).
+
+emit_consumer_created(ChPid, CTag, Exclusive, AckRequired, QName,
+ PrefetchCount, Args, Ref, ActingUser) ->
+ rabbit_event:notify(consumer_created,
+ [{consumer_tag, CTag},
+ {exclusive, Exclusive},
+ {ack_required, AckRequired},
+ {channel, ChPid},
+ {queue, QName},
+ {prefetch_count, PrefetchCount},
+ {arguments, Args},
+ {user_who_performed_action, ActingUser}],
+ Ref).
+
+emit_consumer_deleted(ChPid, ConsumerTag, QName, ActingUser) ->
+ rabbit_core_metrics:consumer_deleted(ChPid, ConsumerTag, QName),
+ rabbit_event:notify(consumer_deleted,
+ [{consumer_tag, ConsumerTag},
+ {channel, ChPid},
+ {queue, QName},
+ {user_who_performed_action, ActingUser}]).
+
+%%----------------------------------------------------------------------------
+
+prioritise_call(Msg, _From, _Len, State) ->
+ case Msg of
+ info -> 9;
+ {info, _Items} -> 9;
+ consumers -> 9;
+ stat -> 7;
+ {basic_consume, _, _, _, _, _, _, _, _, _} -> consumer_bias(State, 0, 2);
+ {basic_cancel, _, _, _} -> consumer_bias(State, 0, 2);
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, State) ->
+ case Msg of
+ delete_immediately -> 8;
+ {delete_exclusive, _Pid} -> 8;
+ {set_ram_duration_target, _Duration} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {run_backing_queue, _Mod, _Fun} -> 6;
+ {ack, _AckTags, _ChPid} -> 4; %% [1]
+ {resume, _ChPid} -> 3;
+ {notify_sent, _ChPid, _Credit} -> consumer_bias(State, 0, 2);
+ _ -> 0
+ end.
+
+%% [1] It should be safe to always prioritise ack / resume since they
+%% will be rate limited by how fast consumers receive messages -
+%% i.e. by notify_sent. We prioritise ack and resume to discourage
+%% starvation caused by prioritising notify_sent. We don't vary their
+%% priority since acks should stay in order (some parts of the queue
+%% stack are optimised for that) and to make things easier to reason
+%% about. Finally, we prioritise ack over resume since it should
+%% always reduce memory use.
+%% bump_reduce_memory_use is prioritised over publishes, because sending
+%% credit to self is hard to reason about. Consumers can continue while
+%% reduce_memory_use is in progress.
+
+consumer_bias(#q{backing_queue = BQ, backing_queue_state = BQS}, Low, High) ->
+ case BQ:msg_rates(BQS) of
+ {0.0, _} -> Low;
+ {Ingress, Egress} when Egress / Ingress < ?CONSUMER_BIAS_RATIO -> High;
+ {_, _} -> Low
+ end.
+
+prioritise_info(Msg, _Len, #q{q = Q}) ->
+ DownPid = amqqueue:get_exclusive_owner(Q),
+ case Msg of
+ {'DOWN', _, process, DownPid, _} -> 8;
+ update_ram_duration -> 8;
+ {maybe_expire, _Version} -> 8;
+ {drop_expired, _Version} -> 8;
+ emit_stats -> 7;
+ sync_timeout -> 6;
+ bump_reduce_memory_use -> 1;
+ _ -> 0
+ end.
+
+handle_call({init, Recover}, From, State) ->
+ try
+ init_it(Recover, From, State)
+ catch
+ {coordinator_not_started, Reason} ->
+ %% The GM can shutdown before the coordinator has started up
+ %% (lost membership or missing group), thus the start_link of
+ %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
+ %% is trapping exists. The master captures this return value and
+ %% throws the current exception.
+ {stop, Reason, State}
+ end;
+
+handle_call(info, _From, State) ->
+ reply({ok, infos(info_keys(), State)}, State);
+
+handle_call({info, Items}, _From, State) ->
+ try
+ reply({ok, infos(Items, State)}, State)
+ catch Error -> reply({error, Error}, State)
+ end;
+
+handle_call(consumers, _From, State = #q{consumers = Consumers, single_active_consumer_on = false}) ->
+ reply(rabbit_queue_consumers:all(Consumers), State);
+handle_call(consumers, _From, State = #q{consumers = Consumers, active_consumer = ActiveConsumer}) ->
+ reply(rabbit_queue_consumers:all(Consumers, ActiveConsumer, true), State);
+
+handle_call({notify_down, ChPid}, _From, State) ->
+ %% we want to do this synchronously, so that auto_deleted queues
+ %% are no longer visible by the time we send a response to the
+ %% client. The queue is ultimately deleted in terminate/2; if we
+ %% return stop with a reply, terminate/2 will be called by
+ %% gen_server2 *before* the reply is sent.
+ case handle_ch_down(ChPid, State) of
+ {ok, State1} -> reply(ok, State1);
+ {stop, State1} -> stop(ok, State1#q{status = {terminated_by, auto_delete}})
+ end;
+
+handle_call({basic_get, ChPid, NoAck, LimiterPid}, _From,
+ State = #q{q = Q}) ->
+ QName = amqqueue:get_name(Q),
+ AckRequired = not NoAck,
+ State1 = ensure_expiry_timer(State),
+ case fetch(AckRequired, State1) of
+ {empty, State2} ->
+ reply(empty, State2);
+ {{Message, IsDelivered, AckTag},
+ #q{backing_queue = BQ, backing_queue_state = BQS} = State2} ->
+ case AckRequired of
+ true -> ok = rabbit_queue_consumers:record_ack(
+ ChPid, LimiterPid, AckTag);
+ false -> ok
+ end,
+ Msg = {QName, self(), AckTag, IsDelivered, Message},
+ reply({ok, BQ:len(BQS), Msg}, State2)
+ end;
+
+handle_call({basic_consume, NoAck, ChPid, LimiterPid, LimiterActive,
+ PrefetchCount, ConsumerTag, ExclusiveConsume, Args, OkMsg, ActingUser},
+ _From, State = #q{consumers = Consumers,
+ active_consumer = Holder,
+ single_active_consumer_on = SingleActiveConsumerOn}) ->
+ ConsumerRegistration = case SingleActiveConsumerOn of
+ true ->
+ case ExclusiveConsume of
+ true ->
+ {error, reply({error, exclusive_consume_unavailable}, State)};
+ false ->
+ Consumers1 = rabbit_queue_consumers:add(
+ ChPid, ConsumerTag, NoAck,
+ LimiterPid, LimiterActive,
+ PrefetchCount, Args, is_empty(State),
+ ActingUser, Consumers),
+
+ case Holder of
+ none ->
+ NewConsumer = rabbit_queue_consumers:get(ChPid, ConsumerTag, Consumers1),
+ {state, State#q{consumers = Consumers1,
+ has_had_consumers = true,
+ active_consumer = NewConsumer}};
+ _ ->
+ {state, State#q{consumers = Consumers1,
+ has_had_consumers = true}}
+ end
+ end;
+ false ->
+ case check_exclusive_access(Holder, ExclusiveConsume, State) of
+ in_use -> {error, reply({error, exclusive_consume_unavailable}, State)};
+ ok ->
+ Consumers1 = rabbit_queue_consumers:add(
+ ChPid, ConsumerTag, NoAck,
+ LimiterPid, LimiterActive,
+ PrefetchCount, Args, is_empty(State),
+ ActingUser, Consumers),
+ ExclusiveConsumer =
+ if ExclusiveConsume -> {ChPid, ConsumerTag};
+ true -> Holder
+ end,
+ {state, State#q{consumers = Consumers1,
+ has_had_consumers = true,
+ active_consumer = ExclusiveConsumer}}
+ end
+ end,
+ case ConsumerRegistration of
+ {error, Reply} ->
+ Reply;
+ {state, State1} ->
+ ok = maybe_send_reply(ChPid, OkMsg),
+ QName = qname(State1),
+ AckRequired = not NoAck,
+ TheConsumer = rabbit_queue_consumers:get(ChPid, ConsumerTag, State1#q.consumers),
+ {ConsumerIsActive, ActivityStatus} =
+ case {SingleActiveConsumerOn, State1#q.active_consumer} of
+ {true, TheConsumer} ->
+ {true, single_active};
+ {true, _} ->
+ {false, waiting};
+ {false, _} ->
+ {true, up}
+ end,
+ rabbit_core_metrics:consumer_created(
+ ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName,
+ PrefetchCount, ConsumerIsActive, ActivityStatus, Args),
+ emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
+ AckRequired, QName, PrefetchCount,
+ Args, none, ActingUser),
+ notify_decorators(State1),
+ reply(ok, run_message_queue(State1))
+ end;
+
+handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg, ActingUser}, _From,
+ State = #q{consumers = Consumers,
+ active_consumer = Holder,
+ single_active_consumer_on = SingleActiveConsumerOn }) ->
+ ok = maybe_send_reply(ChPid, OkMsg),
+ case rabbit_queue_consumers:remove(ChPid, ConsumerTag, Consumers) of
+ not_found ->
+ reply(ok, State);
+ Consumers1 ->
+ Holder1 = new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag,
+ Holder, SingleActiveConsumerOn, Consumers1
+ ),
+ State1 = State#q{consumers = Consumers1,
+ active_consumer = Holder1},
+ maybe_notify_consumer_updated(State1, Holder, Holder1),
+ emit_consumer_deleted(ChPid, ConsumerTag, qname(State1), ActingUser),
+ notify_decorators(State1),
+ case should_auto_delete(State1) of
+ false -> reply(ok, ensure_expiry_timer(State1));
+ true ->
+ log_auto_delete(
+ io_lib:format(
+ "because its last consumer with tag '~s' was cancelled",
+ [ConsumerTag]),
+ State),
+ stop(ok, State1)
+ end
+ end;
+
+handle_call(stat, _From, State) ->
+ State1 = #q{backing_queue = BQ, backing_queue_state = BQS} =
+ ensure_expiry_timer(State),
+ reply({ok, BQ:len(BQS), rabbit_queue_consumers:count()}, State1);
+
+handle_call({delete, IfUnused, IfEmpty, ActingUser}, _From,
+ State = #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ IsEmpty = BQ:is_empty(BQS),
+ IsUnused = is_unused(State),
+ if
+ IfEmpty and not(IsEmpty) -> reply({error, not_empty}, State);
+ IfUnused and not(IsUnused) -> reply({error, in_use}, State);
+ true -> stop({ok, BQ:len(BQS)},
+ State#q{status = {terminated_by, ActingUser}})
+ end;
+
+handle_call(purge, _From, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {Count, BQS1} = BQ:purge(BQS),
+ State1 = State#q{backing_queue_state = BQS1},
+ reply({ok, Count}, maybe_send_drained(Count =:= 0, State1));
+
+handle_call({requeue, AckTags, ChPid}, From, State) ->
+ gen_server2:reply(From, ok),
+ noreply(requeue(AckTags, ChPid, State));
+
+handle_call(sync_mirrors, _From,
+ State = #q{backing_queue = rabbit_mirror_queue_master,
+ backing_queue_state = BQS}) ->
+ S = fun(BQSN) -> State#q{backing_queue_state = BQSN} end,
+ HandleInfo = fun (Status) ->
+ receive {'$gen_call', From, {info, Items}} ->
+ Infos = infos(Items, State#q{status = Status}),
+ gen_server2:reply(From, {ok, Infos})
+ after 0 ->
+ ok
+ end
+ end,
+ EmitStats = fun (Status) ->
+ rabbit_event:if_enabled(
+ State, #q.stats_timer,
+ fun() -> emit_stats(State#q{status = Status}) end)
+ end,
+ case rabbit_mirror_queue_master:sync_mirrors(HandleInfo, EmitStats, BQS) of
+ {ok, BQS1} -> reply(ok, S(BQS1));
+ {stop, Reason, BQS1} -> {stop, Reason, S(BQS1)}
+ end;
+
+handle_call(sync_mirrors, _From, State) ->
+ reply({error, not_mirrored}, State);
+
+%% By definition if we get this message here we do not have to do anything.
+handle_call(cancel_sync_mirrors, _From, State) ->
+ reply({ok, not_syncing}, State).
+
+new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag, CurrentSingleActiveConsumer,
+ _SingleActiveConsumerIsOn = true, Consumers) ->
+ case rabbit_queue_consumers:is_same(ChPid, ConsumerTag, CurrentSingleActiveConsumer) of
+ true ->
+ case rabbit_queue_consumers:get_consumer(Consumers) of
+ undefined -> none;
+ Consumer -> Consumer
+ end;
+ false ->
+ CurrentSingleActiveConsumer
+ end;
+new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag, CurrentSingleActiveConsumer,
+ _SingleActiveConsumerIsOn = false, _Consumers) ->
+ case CurrentSingleActiveConsumer of
+ {ChPid, ConsumerTag} -> none;
+ _ -> CurrentSingleActiveConsumer
+ end.
+
+maybe_notify_consumer_updated(#q{single_active_consumer_on = false}, _, _) ->
+ ok;
+maybe_notify_consumer_updated(#q{single_active_consumer_on = true}, SingleActiveConsumer, SingleActiveConsumer) ->
+ % the single active consumer didn't change, nothing to do
+ ok;
+maybe_notify_consumer_updated(#q{single_active_consumer_on = true} = State, _PreviousConsumer, NewConsumer) ->
+ case NewConsumer of
+ {ChPid, Consumer} ->
+ {Tag, Ack, Prefetch, Args} = rabbit_queue_consumers:get_infos(Consumer),
+ rabbit_core_metrics:consumer_updated(
+ ChPid, Tag, false, Ack, qname(State),
+ Prefetch, true, single_active, Args
+ ),
+ ok;
+ _ ->
+ ok
+ end.
+
+handle_cast(init, State) ->
+ try
+ init_it({no_barrier, non_clean_shutdown}, none, State)
+ catch
+ {coordinator_not_started, Reason} ->
+ %% The GM can shutdown before the coordinator has started up
+ %% (lost membership or missing group), thus the start_link of
+ %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
+ %% is trapping exists. The master captures this return value and
+ %% throws the current exception.
+ {stop, Reason, State}
+ end;
+
+handle_cast({run_backing_queue, Mod, Fun},
+ State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
+ noreply(State#q{backing_queue_state = BQ:invoke(Mod, Fun, BQS)});
+
+handle_cast({deliver,
+ Delivery = #delivery{sender = Sender,
+ flow = Flow},
+ SlaveWhenPublished},
+ State = #q{senders = Senders}) ->
+ Senders1 = case Flow of
+ %% In both credit_flow:ack/1 we are acking messages to the channel
+ %% process that sent us the message delivery. See handle_ch_down
+ %% for more info.
+ flow -> credit_flow:ack(Sender),
+ case SlaveWhenPublished of
+ true -> credit_flow:ack(Sender); %% [0]
+ false -> ok
+ end,
+ pmon:monitor(Sender, Senders);
+ noflow -> Senders
+ end,
+ State1 = State#q{senders = Senders1},
+ noreply(maybe_deliver_or_enqueue(Delivery, SlaveWhenPublished, State1));
+%% [0] The second ack is since the channel thought we were a mirror at
+%% the time it published this message, so it used two credits (see
+%% rabbit_queue_type:deliver/2).
+
+handle_cast({ack, AckTags, ChPid}, State) ->
+ noreply(ack(AckTags, ChPid, State));
+
+handle_cast({reject, true, AckTags, ChPid}, State) ->
+ noreply(requeue(AckTags, ChPid, State));
+
+handle_cast({reject, false, AckTags, ChPid}, State) ->
+ noreply(with_dlx(
+ State#q.dlx,
+ fun (X) -> subtract_acks(ChPid, AckTags, State,
+ fun (State1) ->
+ dead_letter_rejected_msgs(
+ AckTags, X, State1)
+ end) end,
+ fun () -> ack(AckTags, ChPid, State) end));
+
+handle_cast({delete_exclusive, ConnPid}, State) ->
+ log_delete_exclusive(ConnPid, State),
+ stop(State);
+
+handle_cast(delete_immediately, State) ->
+ stop(State);
+
+handle_cast({resume, ChPid}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:resume_fun(),
+ ChPid, State));
+
+handle_cast({notify_sent, ChPid, Credit}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:notify_sent_fun(Credit),
+ ChPid, State));
+
+handle_cast({activate_limit, ChPid}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:activate_limit_fun(),
+ ChPid, State));
+
+handle_cast({set_ram_duration_target, Duration},
+ State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ noreply(State#q{backing_queue_state = BQS1});
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State);
+
+handle_cast(update_mirroring, State = #q{q = Q,
+ mirroring_policy_version = Version}) ->
+ case needs_update_mirroring(Q, Version) of
+ false ->
+ noreply(State);
+ {Policy, NewVersion} ->
+ State1 = State#q{mirroring_policy_version = NewVersion},
+ noreply(update_mirroring(Policy, State1))
+ end;
+
+handle_cast({credit, ChPid, CTag, Credit, Drain},
+ State = #q{consumers = Consumers,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ q = Q}) ->
+ Len = BQ:len(BQS),
+ rabbit_classic_queue:send_queue_event(ChPid, amqqueue:get_name(Q), {send_credit_reply, Len}),
+ noreply(
+ case rabbit_queue_consumers:credit(Len == 0, Credit, Drain, ChPid, CTag,
+ Consumers) of
+ unchanged -> State;
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, State1)
+ end);
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+handle_cast({force_event_refresh, Ref},
+ State = #q{consumers = Consumers,
+ active_consumer = Holder}) ->
+ rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State), Ref),
+ QName = qname(State),
+ AllConsumers = rabbit_queue_consumers:all(Consumers),
+ case Holder of
+ none ->
+ [emit_consumer_created(
+ Ch, CTag, false, AckRequired, QName, Prefetch,
+ Args, Ref, ActingUser) ||
+ {Ch, CTag, AckRequired, Prefetch, _, _, Args, ActingUser}
+ <- AllConsumers];
+ {Ch, CTag} ->
+ [{Ch, CTag, AckRequired, Prefetch, _, _, Args, ActingUser}] = AllConsumers,
+ emit_consumer_created(
+ Ch, CTag, true, AckRequired, QName, Prefetch, Args, Ref, ActingUser)
+ end,
+ noreply(rabbit_event:init_stats_timer(State, #q.stats_timer));
+
+handle_cast(notify_decorators, State) ->
+ notify_decorators(State),
+ noreply(State);
+
+handle_cast(policy_changed, State = #q{q = Q0}) ->
+ Name = amqqueue:get_name(Q0),
+ %% We depend on the #q.q field being up to date at least WRT
+ %% policy (but not mirror pids) in various places, so when it
+ %% changes we go and read it from Mnesia again.
+ %%
+ %% This also has the side effect of waking us up so we emit a
+ %% stats event - so event consumers see the changed policy.
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ noreply(process_args_policy(State#q{q = Q}));
+
+handle_cast({sync_start, _, _}, State = #q{q = Q}) ->
+ Name = amqqueue:get_name(Q),
+ %% Only a mirror should receive this, it means we are a duplicated master
+ rabbit_mirror_queue_misc:log_warning(
+ Name, "Stopping after receiving sync_start from another master", []),
+ stop(State).
+
+handle_info({maybe_expire, Vsn}, State = #q{args_policy_version = Vsn}) ->
+ case is_unused(State) of
+ true -> stop(State);
+ false -> noreply(State#q{expiry_timer_ref = undefined})
+ end;
+
+handle_info({maybe_expire, _Vsn}, State) ->
+ noreply(State);
+
+handle_info({drop_expired, Vsn}, State = #q{args_policy_version = Vsn}) ->
+ WasEmpty = is_empty(State),
+ State1 = drop_expired_msgs(State#q{ttl_timer_ref = undefined}),
+ noreply(maybe_send_drained(WasEmpty, State1));
+
+handle_info({drop_expired, _Vsn}, State) ->
+ noreply(State);
+
+handle_info(emit_stats, State) ->
+ emit_stats(State),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(rabbit_event:reset_stats_timer(
+ State, #q.stats_timer)),
+ {noreply, State1, Timeout};
+
+handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason},
+ State = #q{q = Q}) when ?amqqueue_exclusive_owner_is(Q, DownPid) ->
+ %% Exclusively owned queues must disappear with their owner. In
+ %% the case of clean shutdown we delete the queue synchronously in
+ %% the reader - although not required by the spec this seems to
+ %% match what people expect (see bug 21824). However we need this
+ %% monitor-and-async- delete in case the connection goes away
+ %% unexpectedly.
+ log_delete_exclusive(DownPid, State),
+ stop(State);
+
+handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) ->
+ case handle_ch_down(DownPid, State) of
+ {ok, State1} -> noreply(State1);
+ {stop, State1} -> stop(State1)
+ end;
+
+handle_info(update_ram_duration, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(State#q{rate_timer_ref = undefined,
+ backing_queue_state = BQS2}),
+ {noreply, State1, Timeout};
+
+handle_info(sync_timeout, State) ->
+ noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined}));
+
+handle_info(timeout, State) ->
+ noreply(backing_queue_timeout(State));
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+
+handle_info({bump_credit, Msg}, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% The message_store is granting us more credit. This means the
+ %% backing queue (for the rabbit_variable_queue case) might
+ %% continue paging messages to disk if it still needs to. We
+ %% consume credits from the message_store whenever we need to
+ %% persist a message to disk. See:
+ %% rabbit_variable_queue:msg_store_write/4.
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State#q{backing_queue_state = BQ:resume(BQS)});
+handle_info(bump_reduce_memory_use, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS0}) ->
+ BQS1 = BQ:handle_info(bump_reduce_memory_use, BQS0),
+ noreply(State#q{backing_queue_state = BQ:resume(BQS1)});
+
+handle_info(Info, State) ->
+ {stop, {unhandled_info, Info}, State}.
+
+handle_pre_hibernate(State = #q{backing_queue_state = undefined}) ->
+ {hibernate, State};
+handle_pre_hibernate(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ BQS3 = BQ:handle_pre_hibernate(BQS2),
+ rabbit_event:if_enabled(
+ State, #q.stats_timer,
+ fun () -> emit_stats(State,
+ [{idle_since,
+ os:system_time(milli_seconds)},
+ {consumer_utilisation, ''}])
+ end),
+ State1 = rabbit_event:stop_stats_timer(State#q{backing_queue_state = BQS3},
+ #q.stats_timer),
+ {hibernate, stop_rate_timer(State1)}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+format(Q) when ?is_amqqueue(Q) ->
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false ->
+ [{node, node(amqqueue:get_pid(Q))}];
+ true ->
+ Slaves = amqqueue:get_slave_pids(Q),
+ SSlaves = amqqueue:get_sync_slave_pids(Q),
+ [{slave_nodes, [node(S) || S <- Slaves]},
+ {synchronised_slave_nodes, [node(S) || S <- SSlaves]},
+ {node, node(amqqueue:get_pid(Q))}]
+ end.
+
+-spec is_policy_applicable(amqqueue:amqqueue(), any()) -> boolean().
+is_policy_applicable(_Q, _Policy) ->
+ true.
+
+log_delete_exclusive({ConPid, _ConRef}, State) ->
+ log_delete_exclusive(ConPid, State);
+log_delete_exclusive(ConPid, #q{ q = Q }) ->
+ Resource = amqqueue:get_name(Q),
+ #resource{ name = QName, virtual_host = VHost } = Resource,
+ rabbit_log_queue:debug("Deleting exclusive queue '~s' in vhost '~s' " ++
+ "because its declaring connection ~p was closed",
+ [QName, VHost, ConPid]).
+
+log_auto_delete(Reason, #q{ q = Q }) ->
+ Resource = amqqueue:get_name(Q),
+ #resource{ name = QName, virtual_host = VHost } = Resource,
+ rabbit_log_queue:debug("Deleting auto-delete queue '~s' in vhost '~s' " ++
+ Reason,
+ [QName, VHost]).
+
+needs_update_mirroring(Q, Version) ->
+ {ok, UpQ} = rabbit_amqqueue:lookup(amqqueue:get_name(Q)),
+ DBVersion = amqqueue:get_policy_version(UpQ),
+ case DBVersion > Version of
+ true -> {rabbit_policy:get(<<"ha-mode">>, UpQ), DBVersion};
+ false -> false
+ end.
+
+
+update_mirroring(Policy, State = #q{backing_queue = BQ}) ->
+ case update_to(Policy, BQ) of
+ start_mirroring ->
+ start_mirroring(State);
+ stop_mirroring ->
+ stop_mirroring(State);
+ ignore ->
+ State;
+ update_ha_mode ->
+ update_ha_mode(State)
+ end.
+
+update_to(undefined, rabbit_mirror_queue_master) ->
+ stop_mirroring;
+update_to(_, rabbit_mirror_queue_master) ->
+ update_ha_mode;
+update_to(undefined, BQ) when BQ =/= rabbit_mirror_queue_master ->
+ ignore;
+update_to(_, BQ) when BQ =/= rabbit_mirror_queue_master ->
+ start_mirroring.
+
+start_mirroring(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% lookup again to get policy for init_with_existing_bq
+ {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
+ true = BQ =/= rabbit_mirror_queue_master, %% assertion
+ BQ1 = rabbit_mirror_queue_master,
+ BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS),
+ State#q{backing_queue = BQ1,
+ backing_queue_state = BQS1}.
+
+stop_mirroring(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQ = rabbit_mirror_queue_master, %% assertion
+ {BQ1, BQS1} = BQ:stop_mirroring(BQS),
+ State#q{backing_queue = BQ1,
+ backing_queue_state = BQS1}.
+
+update_ha_mode(State) ->
+ {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
+ ok = rabbit_mirror_queue_misc:update_mirrors(Q),
+ State.
+
+confirm_to_sender(Pid, QName, MsgSeqNos) ->
+ rabbit_classic_queue:confirm_to_sender(Pid, QName, MsgSeqNos).
+
+
diff --git a/deps/rabbit/src/rabbit_amqqueue_sup.erl b/deps/rabbit/src/rabbit_amqqueue_sup.erl
new file mode 100644
index 0000000000..a9eaf4087f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_amqqueue_sup.erl
@@ -0,0 +1,35 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(amqqueue:amqqueue(), rabbit_prequeue:start_mode()) ->
+ {'ok', pid(), pid()}.
+
+start_link(Q, StartMode) ->
+ Marker = spawn_link(fun() -> receive stop -> ok end end),
+ ChildSpec = {rabbit_amqqueue,
+ {rabbit_prequeue, start_link, [Q, StartMode, Marker]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_amqqueue_process,
+ rabbit_mirror_queue_slave]},
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, QPid} = supervisor2:start_child(SupPid, ChildSpec),
+ unlink(Marker),
+ Marker ! stop,
+ {ok, SupPid, QPid}.
+
+init([]) -> {ok, {{one_for_one, 5, 10}, []}}.
diff --git a/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl b/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl
new file mode 100644
index 0000000000..732816b79f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl
@@ -0,0 +1,84 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_sup_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_queue_process/3]).
+-export([start_for_vhost/1, stop_for_vhost/1,
+ find_for_vhost/2, find_for_vhost/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+-spec start_queue_process
+ (node(), amqqueue:amqqueue(), 'declare' | 'recovery' | 'slave') ->
+ pid().
+
+start_queue_process(Node, Q, StartMode) ->
+ #resource{virtual_host = VHost} = amqqueue:get_name(Q),
+ {ok, Sup} = find_for_vhost(VHost, Node),
+ {ok, _SupPid, QPid} = supervisor2:start_child(Sup, [Q, StartMode]),
+ QPid.
+
+init([]) ->
+ {ok, {{simple_one_for_one, 10, 10},
+ [{rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []},
+ temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}.
+
+-spec find_for_vhost(rabbit_types:vhost()) -> {ok, pid()} | {error, term()}.
+find_for_vhost(VHost) ->
+ find_for_vhost(VHost, node()).
+
+-spec find_for_vhost(rabbit_types:vhost(), atom()) -> {ok, pid()} | {error, term()}.
+find_for_vhost(VHost, Node) ->
+ {ok, VHostSup} = rabbit_vhost_sup_sup:get_vhost_sup(VHost, Node),
+ case supervisor2:find_child(VHostSup, rabbit_amqqueue_sup_sup) of
+ [QSup] -> {ok, QSup};
+ Result -> {error, {queue_supervisor_not_found, Result}}
+ end.
+
+-spec start_for_vhost(rabbit_types:vhost()) -> {ok, pid()} | {error, term()}.
+start_for_vhost(VHost) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ supervisor2:start_child(
+ VHostSup,
+ {rabbit_amqqueue_sup_sup,
+ {rabbit_amqqueue_sup_sup, start_link, []},
+ transient, infinity, supervisor, [rabbit_amqqueue_sup_sup]});
+ %% we can get here if a vhost is added and removed concurrently
+ %% e.g. some integration tests do it
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to start a queue process supervisor for vhost ~s: vhost no longer exists!",
+ [VHost]),
+ {error, {no_such_vhost, VHost}}
+ end.
+
+-spec stop_for_vhost(rabbit_types:vhost()) -> ok.
+stop_for_vhost(VHost) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ ok = supervisor2:terminate_child(VHostSup, rabbit_amqqueue_sup_sup),
+ ok = supervisor2:delete_child(VHostSup, rabbit_amqqueue_sup_sup);
+ %% see start/1
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to stop a queue process supervisor for vhost ~s: vhost no longer exists!",
+ [VHost]),
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_auth_backend_internal.erl b/deps/rabbit/src/rabbit_auth_backend_internal.erl
new file mode 100644
index 0000000000..cb930a1630
--- /dev/null
+++ b/deps/rabbit/src/rabbit_auth_backend_internal.erl
@@ -0,0 +1,1076 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_internal).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4]).
+
+-export([add_user/3, delete_user/2, lookup_user/1, exists/1,
+ change_password/3, clear_password/2,
+ hash_password/2, change_password_hash/2, change_password_hash/3,
+ set_tags/3, set_permissions/6, clear_permissions/3,
+ set_topic_permissions/6, clear_topic_permissions/3, clear_topic_permissions/4,
+ add_user_sans_validation/3, put_user/2, put_user/3]).
+
+-export([set_user_limits/3, clear_user_limits/3, is_over_connection_limit/1,
+ is_over_channel_limit/1, get_user_limits/0, get_user_limits/1]).
+
+-export([user_info_keys/0, perms_info_keys/0,
+ user_perms_info_keys/0, vhost_perms_info_keys/0,
+ user_vhost_perms_info_keys/0, all_users/0,
+ list_users/0, list_users/2, list_permissions/0,
+ list_user_permissions/1, list_user_permissions/3,
+ list_topic_permissions/0,
+ list_vhost_permissions/1, list_vhost_permissions/3,
+ list_user_vhost_permissions/2,
+ list_user_topic_permissions/1, list_vhost_topic_permissions/1, list_user_vhost_topic_permissions/2]).
+
+-export([state_can_expire/0]).
+
+%% for testing
+-export([hashing_module_for_user/1, expand_topic_permission/2]).
+
+%%----------------------------------------------------------------------------
+
+-type regexp() :: binary().
+
+%%----------------------------------------------------------------------------
+%% Implementation of rabbit_auth_backend
+
+%% Returns a password hashing module for the user record provided. If
+%% there is no information in the record, we consider it to be legacy
+%% (inserted by a version older than 3.6.0) and fall back to MD5, the
+%% now obsolete hashing function.
+hashing_module_for_user(User) ->
+ ModOrUndefined = internal_user:get_hashing_algorithm(User),
+ rabbit_password:hashing_mod(ModOrUndefined).
+
+-define(BLANK_PASSWORD_REJECTION_MESSAGE,
+ "user '~s' attempted to log in with a blank password, which is prohibited by the internal authN backend. "
+ "To use TLS/x509 certificate-based authentication, see the rabbitmq_auth_mechanism_ssl plugin and configure the client to use the EXTERNAL authentication mechanism. "
+ "Alternatively change the password for the user to be non-blank.").
+
+%% For cases when we do not have a set of credentials,
+%% namely when x509 (TLS) certificates are used. This should only be
+%% possible when the EXTERNAL authentication mechanism is used, see
+%% rabbit_auth_mechanism_plain:handle_response/2 and rabbit_reader:auth_phase/2.
+user_login_authentication(Username, []) ->
+ internal_check_user_login(Username, fun(_) -> true end);
+%% For cases when we do have a set of credentials. rabbit_auth_mechanism_plain:handle_response/2
+%% performs initial validation.
+user_login_authentication(Username, AuthProps) ->
+ case lists:keyfind(password, 1, AuthProps) of
+ {password, <<"">>} ->
+ {refused, ?BLANK_PASSWORD_REJECTION_MESSAGE,
+ [Username]};
+ {password, ""} ->
+ {refused, ?BLANK_PASSWORD_REJECTION_MESSAGE,
+ [Username]};
+ {password, Cleartext} ->
+ internal_check_user_login(
+ Username,
+ fun(User) ->
+ case internal_user:get_password_hash(User) of
+ <<Salt:4/binary, Hash/binary>> ->
+ Hash =:= rabbit_password:salted_hash(
+ hashing_module_for_user(User), Salt, Cleartext);
+ _ ->
+ false
+ end
+ end);
+ false -> exit({unknown_auth_props, Username, AuthProps})
+ end.
+
+state_can_expire() -> false.
+
+user_login_authorization(Username, _AuthProps) ->
+ case user_login_authentication(Username, []) of
+ {ok, #auth_user{impl = Impl, tags = Tags}} -> {ok, Impl, Tags};
+ Else -> Else
+ end.
+
+internal_check_user_login(Username, Fun) ->
+ Refused = {refused, "user '~s' - invalid credentials", [Username]},
+ case lookup_user(Username) of
+ {ok, User} ->
+ Tags = internal_user:get_tags(User),
+ case Fun(User) of
+ true -> {ok, #auth_user{username = Username,
+ tags = Tags,
+ impl = none}};
+ _ -> Refused
+ end;
+ {error, not_found} ->
+ Refused
+ end.
+
+check_vhost_access(#auth_user{username = Username}, VHostPath, _AuthzData) ->
+ case mnesia:dirty_read({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VHostPath}}) of
+ [] -> false;
+ [_R] -> true
+ end.
+
+check_resource_access(#auth_user{username = Username},
+ #resource{virtual_host = VHostPath, name = Name},
+ Permission,
+ _AuthContext) ->
+ case mnesia:dirty_read({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VHostPath}}) of
+ [] ->
+ false;
+ [#user_permission{permission = P}] ->
+ PermRegexp = case element(permission_index(Permission), P) of
+ %% <<"^$">> breaks Emacs' erlang mode
+ <<"">> -> <<$^, $$>>;
+ RE -> RE
+ end,
+ case re:run(Name, PermRegexp, [{capture, none}]) of
+ match -> true;
+ nomatch -> false
+ end
+ end.
+
+check_topic_access(#auth_user{username = Username},
+ #resource{virtual_host = VHostPath, name = Name, kind = topic},
+ Permission,
+ Context) ->
+ case mnesia:dirty_read({rabbit_topic_permission,
+ #topic_permission_key{user_vhost = #user_vhost{username = Username,
+ virtual_host = VHostPath},
+ exchange = Name
+ }}) of
+ [] ->
+ true;
+ [#topic_permission{permission = P}] ->
+ PermRegexp = case element(permission_index(Permission), P) of
+ %% <<"^$">> breaks Emacs' erlang mode
+ <<"">> -> <<$^, $$>>;
+ RE -> RE
+ end,
+ PermRegexpExpanded = expand_topic_permission(
+ PermRegexp,
+ maps:get(variable_map, Context, undefined)
+ ),
+ case re:run(maps:get(routing_key, Context), PermRegexpExpanded, [{capture, none}]) of
+ match -> true;
+ nomatch -> false
+ end
+ end.
+
+expand_topic_permission(Permission, ToExpand) when is_map(ToExpand) ->
+ Opening = <<"{">>,
+ Closing = <<"}">>,
+ ReplaceFun = fun(K, V, Acc) ->
+ Placeholder = <<Opening/binary, K/binary, Closing/binary>>,
+ binary:replace(Acc, Placeholder, V, [global])
+ end,
+ maps:fold(ReplaceFun, Permission, ToExpand);
+expand_topic_permission(Permission, _ToExpand) ->
+ Permission.
+
+permission_index(configure) -> #permission.configure;
+permission_index(write) -> #permission.write;
+permission_index(read) -> #permission.read.
+
+%%----------------------------------------------------------------------------
+%% Manipulation of the user database
+
+validate_credentials(Username, Password) ->
+ rabbit_credential_validation:validate(Username, Password).
+
+validate_and_alternate_credentials(Username, Password, ActingUser, Fun) ->
+ case validate_credentials(Username, Password) of
+ ok ->
+ Fun(Username, Password, ActingUser);
+ {error, Err} ->
+ rabbit_log:error("Credential validation for '~s' failed!~n", [Username]),
+ {error, Err}
+ end.
+
+-spec add_user(rabbit_types:username(), rabbit_types:password(),
+ rabbit_types:username()) -> 'ok' | {'error', string()}.
+
+add_user(Username, Password, ActingUser) ->
+ validate_and_alternate_credentials(Username, Password, ActingUser,
+ fun add_user_sans_validation/3).
+
+add_user_sans_validation(Username, Password, ActingUser) ->
+ rabbit_log:debug("Asked to create a new user '~s', password length in bytes: ~p", [Username, bit_size(Password)]),
+ %% hash_password will pick the hashing function configured for us
+ %% but we also need to store a hint as part of the record, so we
+ %% retrieve it here one more time
+ HashingMod = rabbit_password:hashing_mod(),
+ PasswordHash = hash_password(HashingMod, Password),
+ User = internal_user:create_user(Username, PasswordHash, HashingMod),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_user, Username}) of
+ [] ->
+ ok = mnesia:write(rabbit_user, User, write);
+ _ ->
+ mnesia:abort({user_already_exists, Username})
+ end
+ end),
+ rabbit_log:info("Created user '~s'", [Username]),
+ rabbit_event:notify(user_created, [{name, Username},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {user_already_exists, _}} = Error ->
+ rabbit_log:warning("Failed to add user '~s': the user already exists", [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to add user '~s': ~p", [Username, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to add user '~s': ~p", [Username, Error]),
+ exit(Error)
+ end .
+
+-spec delete_user(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
+
+delete_user(Username, ActingUser) ->
+ rabbit_log:debug("Asked to delete user '~s'", [Username]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_misc:with_user(
+ Username,
+ fun () ->
+ ok = mnesia:delete({rabbit_user, Username}),
+ [ok = mnesia:delete_object(
+ rabbit_user_permission, R, write) ||
+ R <- mnesia:match_object(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = '_'},
+ permission = '_'},
+ write)],
+ UserTopicPermissionsQuery = match_user_vhost_topic_permission(Username, '_'),
+ UserTopicPermissions = UserTopicPermissionsQuery(),
+ [ok = mnesia:delete_object(rabbit_topic_permission, R, write) || R <- UserTopicPermissions],
+ ok
+ end)),
+ rabbit_log:info("Deleted user '~s'", [Username]),
+ rabbit_event:notify(user_deleted,
+ [{name, Username},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to delete user '~s': the user does not exist", [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to delete user '~s': ~p", [Username, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to delete user '~s': ~p", [Username, Error]),
+ exit(Error)
+ end .
+
+-spec lookup_user
+ (rabbit_types:username()) ->
+ rabbit_types:ok(internal_user:internal_user()) |
+ rabbit_types:error('not_found').
+
+lookup_user(Username) ->
+ rabbit_misc:dirty_read({rabbit_user, Username}).
+
+-spec exists(rabbit_types:username()) -> boolean().
+
+exists(Username) ->
+ case lookup_user(Username) of
+ {error, not_found} -> false;
+ _ -> true
+ end.
+
+-spec change_password
+ (rabbit_types:username(), rabbit_types:password(), rabbit_types:username()) -> 'ok'.
+
+change_password(Username, Password, ActingUser) ->
+ validate_and_alternate_credentials(Username, Password, ActingUser,
+ fun change_password_sans_validation/3).
+
+change_password_sans_validation(Username, Password, ActingUser) ->
+ try
+ rabbit_log:debug("Asked to change password of user '~s', new password length in bytes: ~p", [Username, bit_size(Password)]),
+ HashingAlgorithm = rabbit_password:hashing_mod(),
+ R = change_password_hash(Username,
+ hash_password(rabbit_password:hashing_mod(),
+ Password),
+ HashingAlgorithm),
+ rabbit_log:info("Successfully changed password for user '~s'", [Username]),
+ rabbit_event:notify(user_password_changed,
+ [{name, Username},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to change password for user '~s': the user does not exist", [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to change password for user '~s': ~p", [Username, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to change password for user '~s': ~p", [Username, Error]),
+ exit(Error)
+ end.
+
+-spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
+
+clear_password(Username, ActingUser) ->
+ rabbit_log:info("Clearing password for '~s'~n", [Username]),
+ R = change_password_hash(Username, <<"">>),
+ rabbit_event:notify(user_password_cleared,
+ [{name, Username},
+ {user_who_performed_action, ActingUser}]),
+ R.
+
+-spec hash_password
+ (module(), rabbit_types:password()) -> rabbit_types:password_hash().
+
+hash_password(HashingMod, Cleartext) ->
+ rabbit_password:hash(HashingMod, Cleartext).
+
+-spec change_password_hash
+ (rabbit_types:username(), rabbit_types:password_hash()) -> 'ok'.
+
+change_password_hash(Username, PasswordHash) ->
+ change_password_hash(Username, PasswordHash, rabbit_password:hashing_mod()).
+
+
+change_password_hash(Username, PasswordHash, HashingAlgorithm) ->
+ update_user(Username, fun(User) ->
+ internal_user:set_password_hash(User,
+ PasswordHash, HashingAlgorithm)
+ end).
+
+-spec set_tags(rabbit_types:username(), [atom()], rabbit_types:username()) -> 'ok'.
+
+set_tags(Username, Tags, ActingUser) ->
+ ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
+ rabbit_log:debug("Asked to set user tags for user '~s' to ~p", [Username, ConvertedTags]),
+ try
+ R = update_user(Username, fun(User) ->
+ internal_user:set_tags(User, ConvertedTags)
+ end),
+ rabbit_log:info("Successfully set user tags for user '~s' to ~p", [Username, ConvertedTags]),
+ rabbit_event:notify(user_tags_set, [{name, Username}, {tags, ConvertedTags},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to set tags for user '~s': the user does not exist", [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to set tags for user '~s': ~p", [Username, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to set tags for user '~s': ~p", [Username, Error]),
+ exit(Error)
+ end .
+
+-spec set_permissions
+ (rabbit_types:username(), rabbit_types:vhost(), regexp(), regexp(),
+ regexp(), rabbit_types:username()) ->
+ 'ok'.
+
+set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, ActingUser) ->
+ rabbit_log:debug("Asked to set permissions for "
+ "'~s' in virtual host '~s' to '~s', '~s', '~s'",
+ [Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
+ lists:map(
+ fun (RegexpBin) ->
+ Regexp = binary_to_list(RegexpBin),
+ case re:compile(Regexp) of
+ {ok, _} -> ok;
+ {error, Reason} ->
+ rabbit_log:warning("Failed to set permissions for '~s' in virtual host '~s': "
+ "regular expression '~s' is invalid",
+ [Username, VirtualHost, RegexpBin]),
+ throw({error, {invalid_regexp, Regexp, Reason}})
+ end
+ end, [ConfigurePerm, WritePerm, ReadPerm]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () -> ok = mnesia:write(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VirtualHost},
+ permission = #permission{
+ configure = ConfigurePerm,
+ write = WritePerm,
+ read = ReadPerm}},
+ write)
+ end)),
+ rabbit_log:info("Successfully set permissions for "
+ "'~s' in virtual host '~s' to '~s', '~s', '~s'",
+ [Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
+ rabbit_event:notify(permission_created, [{user, Username},
+ {vhost, VirtualHost},
+ {configure, ConfigurePerm},
+ {write, WritePerm},
+ {read, ReadPerm},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to set permissions for '~s': virtual host '~s' does not exist",
+ [Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to set permissions for '~s': the user does not exist",
+ [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to set permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to set permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ exit(Error)
+ end.
+
+-spec clear_permissions
+ (rabbit_types:username(), rabbit_types:vhost(), rabbit_types:username()) -> 'ok'.
+
+clear_permissions(Username, VirtualHost, ActingUser) ->
+ rabbit_log:debug("Asked to clear permissions for '~s' in virtual host '~s'",
+ [Username, VirtualHost]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () ->
+ ok = mnesia:delete({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VirtualHost}})
+ end)),
+ rabbit_log:info("Successfully cleared permissions for '~s' in virtual host '~s'",
+ [Username, VirtualHost]),
+ rabbit_event:notify(permission_deleted, [{user, Username},
+ {vhost, VirtualHost},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to clear permissions for '~s': virtual host '~s' does not exist",
+ [Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to clear permissions for '~s': the user does not exist",
+ [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to clear permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to clear permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ exit(Error)
+ end.
+
+
+update_user(Username, Fun) ->
+ rabbit_misc:execute_mnesia_transaction(
+ rabbit_misc:with_user(
+ Username,
+ fun () ->
+ {ok, User} = lookup_user(Username),
+ ok = mnesia:write(rabbit_user, Fun(User), write)
+ end)).
+
+set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, ActingUser) ->
+ rabbit_log:debug("Asked to set topic permissions on exchange '~s' for "
+ "user '~s' in virtual host '~s' to '~s', '~s'",
+ [Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
+ WritePermRegex = rabbit_data_coercion:to_binary(WritePerm),
+ ReadPermRegex = rabbit_data_coercion:to_binary(ReadPerm),
+ lists:map(
+ fun (RegexpBin) ->
+ case re:compile(RegexpBin) of
+ {ok, _} -> ok;
+ {error, Reason} ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for "
+ "'~s' in virtual host '~s': regular expression '~s' is invalid",
+ [Exchange, Username, VirtualHost, RegexpBin]),
+ throw({error, {invalid_regexp, RegexpBin, Reason}})
+ end
+ end, [WritePerm, ReadPerm]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () -> ok = mnesia:write(
+ rabbit_topic_permission,
+ #topic_permission{
+ topic_permission_key = #topic_permission_key{
+ user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VirtualHost},
+ exchange = Exchange
+ },
+ permission = #permission{
+ write = WritePermRegex,
+ read = ReadPermRegex
+ }
+ },
+ write)
+ end)),
+ rabbit_log:info("Successfully set topic permissions on exchange '~s' for "
+ "'~s' in virtual host '~s' to '~s', '~s'",
+ [Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
+ rabbit_event:notify(topic_permission_created, [
+ {user, Username},
+ {vhost, VirtualHost},
+ {exchange, Exchange},
+ {write, WritePermRegex},
+ {read, ReadPermRegex},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for '~s': virtual host '~s' does not exist.",
+ [Exchange, Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for '~s': the user does not exist.",
+ [Exchange, Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for '~s' in virtual host '~s': ~p.",
+ [Exchange, Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for '~s' in virtual host '~s': ~p.",
+ [Exchange, Username, VirtualHost, Error]),
+ exit(Error)
+ end .
+
+clear_topic_permissions(Username, VirtualHost, ActingUser) ->
+ rabbit_log:debug("Asked to clear topic permissions for '~s' in virtual host '~s'",
+ [Username, VirtualHost]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () ->
+ ListFunction = match_user_vhost_topic_permission(Username, VirtualHost),
+ List = ListFunction(),
+ lists:foreach(fun(X) ->
+ ok = mnesia:delete_object(rabbit_topic_permission, X, write)
+ end, List)
+ end)),
+ rabbit_log:info("Successfully cleared topic permissions for '~s' in virtual host '~s'",
+ [Username, VirtualHost]),
+ rabbit_event:notify(topic_permission_deleted, [{user, Username},
+ {vhost, VirtualHost},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to clear topic permissions for '~s': virtual host '~s' does not exist",
+ [Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to clear topic permissions for '~s': the user does not exist",
+ [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to clear topic permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to clear topic permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ exit(Error)
+ end.
+
+clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) ->
+ rabbit_log:debug("Asked to clear topic permissions on exchange '~s' for '~s' in virtual host '~s'",
+ [Exchange, Username, VirtualHost]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () ->
+ ok = mnesia:delete(rabbit_topic_permission,
+ #topic_permission_key{
+ user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VirtualHost},
+ exchange = Exchange
+ }, write)
+ end)),
+ rabbit_log:info("Successfully cleared topic permissions on exchange '~s' for '~s' in virtual host '~s'",
+ [Exchange, Username, VirtualHost]),
+ rabbit_event:notify(permission_deleted, [{user, Username},
+ {vhost, VirtualHost},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to clear topic permissions on exchange '~s' for '~s': virtual host '~s' does not exist",
+ [Exchange, Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to clear topic permissions on exchange '~s' for '~s': the user does not exist",
+ [Exchange, Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to clear topic permissions on exchange '~s' for '~s' in virtual host '~s': ~p",
+ [Exchange, Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to clear topic permissions on exchange '~s' for '~s' in virtual host '~s': ~p",
+ [Exchange, Username, VirtualHost, Error]),
+ exit(Error)
+ end.
+
+put_user(User, ActingUser) -> put_user(User, undefined, ActingUser).
+
+put_user(User, Version, ActingUser) ->
+ Username = maps:get(name, User),
+ HasPassword = maps:is_key(password, User),
+ HasPasswordHash = maps:is_key(password_hash, User),
+ Password = maps:get(password, User, undefined),
+ PasswordHash = maps:get(password_hash, User, undefined),
+
+ Tags = case {maps:get(tags, User, undefined), maps:get(administrator, User, undefined)} of
+ {undefined, undefined} ->
+ throw({error, tags_not_present});
+ {undefined, AdminS} ->
+ case rabbit_misc:parse_bool(AdminS) of
+ true -> [administrator];
+ false -> []
+ end;
+ {TagsS, _} ->
+ [list_to_atom(string:strip(T)) ||
+ T <- string:tokens(binary_to_list(TagsS), ",")]
+ end,
+
+ %% pre-configured, only applies to newly created users
+ Permissions = maps:get(permissions, User, undefined),
+
+ PassedCredentialValidation =
+ case {HasPassword, HasPasswordHash} of
+ {true, false} ->
+ rabbit_credential_validation:validate(Username, Password) =:= ok;
+ {false, true} -> true;
+ _ ->
+ rabbit_credential_validation:validate(Username, Password) =:= ok
+ end,
+
+ case exists(Username) of
+ true ->
+ case {HasPassword, HasPasswordHash} of
+ {true, false} ->
+ update_user_password(PassedCredentialValidation, Username, Password, Tags, ActingUser);
+ {false, true} ->
+ update_user_password_hash(Username, PasswordHash, Tags, User, Version, ActingUser);
+ {true, true} ->
+ throw({error, both_password_and_password_hash_are_provided});
+ %% clear password, update tags if needed
+ _ ->
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser),
+ rabbit_auth_backend_internal:clear_password(Username, ActingUser)
+ end;
+ false ->
+ case {HasPassword, HasPasswordHash} of
+ {true, false} ->
+ create_user_with_password(PassedCredentialValidation, Username, Password, Tags, Permissions, ActingUser);
+ {false, true} ->
+ create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, Permissions, ActingUser);
+ {true, true} ->
+ throw({error, both_password_and_password_hash_are_provided});
+ {false, false} ->
+ %% this user won't be able to sign in using
+ %% a username/password pair but can be used for x509 certificate authentication,
+ %% with authn backends such as HTTP or LDAP and so on.
+ create_user_with_password(PassedCredentialValidation, Username, <<"">>, Tags, Permissions, ActingUser)
+ end
+ end.
+
+update_user_password(_PassedCredentialValidation = true, Username, Password, Tags, ActingUser) ->
+ rabbit_auth_backend_internal:change_password(Username, Password, ActingUser),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser);
+update_user_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _ActingUser) ->
+ %% we don't log here because
+ %% rabbit_auth_backend_internal will do it
+ throw({error, credential_validation_failed}).
+
+update_user_password_hash(Username, PasswordHash, Tags, User, Version, ActingUser) ->
+ %% when a hash this provided, credential validation
+ %% is not applied
+ HashingAlgorithm = hashing_algorithm(User, Version),
+
+ Hash = rabbit_misc:b64decode_or_throw(PasswordHash),
+ rabbit_auth_backend_internal:change_password_hash(
+ Username, Hash, HashingAlgorithm),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser).
+
+create_user_with_password(_PassedCredentialValidation = true, Username, Password, Tags, undefined, ActingUser) ->
+ rabbit_auth_backend_internal:add_user(Username, Password, ActingUser),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser);
+create_user_with_password(_PassedCredentialValidation = true, Username, Password, Tags, PreconfiguredPermissions, ActingUser) ->
+ rabbit_auth_backend_internal:add_user(Username, Password, ActingUser),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser),
+ preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser);
+create_user_with_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _, _) ->
+ %% we don't log here because
+ %% rabbit_auth_backend_internal will do it
+ throw({error, credential_validation_failed}).
+
+create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, PreconfiguredPermissions, ActingUser) ->
+ %% when a hash this provided, credential validation
+ %% is not applied
+ HashingAlgorithm = hashing_algorithm(User, Version),
+ Hash = rabbit_misc:b64decode_or_throw(PasswordHash),
+
+ %% first we create a user with dummy credentials and no
+ %% validation applied, then we update password hash
+ TmpPassword = rabbit_guid:binary(rabbit_guid:gen_secure(), "tmp"),
+ rabbit_auth_backend_internal:add_user_sans_validation(Username, TmpPassword, ActingUser),
+
+ rabbit_auth_backend_internal:change_password_hash(
+ Username, Hash, HashingAlgorithm),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser),
+ preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser).
+
+preconfigure_permissions(_Username, undefined, _ActingUser) ->
+ ok;
+preconfigure_permissions(Username, Map, ActingUser) when is_map(Map) ->
+ maps:map(fun(VHost, M) ->
+ rabbit_auth_backend_internal:set_permissions(Username, VHost,
+ maps:get(<<"configure">>, M),
+ maps:get(<<"write">>, M),
+ maps:get(<<"read">>, M),
+ ActingUser)
+ end,
+ Map),
+ ok.
+
+set_user_limits(Username, Definition, ActingUser) when is_list(Definition); is_binary(Definition) ->
+ case rabbit_feature_flags:is_enabled(user_limits) of
+ true ->
+ case rabbit_json:try_decode(rabbit_data_coercion:to_binary(Definition)) of
+ {ok, Term} ->
+ validate_parameters_and_update_limit(Username, Term, ActingUser);
+ {error, Reason} ->
+ {error_string, rabbit_misc:format(
+ "JSON decoding error. Reason: ~ts", [Reason])}
+ end;
+ false -> {error_string, "cannot set any user limits: the user_limits feature flag is not enabled"}
+ end;
+set_user_limits(Username, Definition, ActingUser) when is_map(Definition) ->
+ case rabbit_feature_flags:is_enabled(user_limits) of
+ true -> validate_parameters_and_update_limit(Username, Definition, ActingUser);
+ false -> {error_string, "cannot set any user limits: the user_limits feature flag is not enabled"}
+ end.
+
+validate_parameters_and_update_limit(Username, Term, ActingUser) ->
+ case flatten_errors(rabbit_parameter_validation:proplist(
+ <<"user-limits">>, user_limit_validation(), Term)) of
+ ok ->
+ update_user(Username, fun(User) ->
+ internal_user:update_limits(add, User, Term)
+ end),
+ notify_limit_set(Username, ActingUser, Term);
+ {errors, [{Reason, Arguments}]} ->
+ {error_string, rabbit_misc:format(Reason, Arguments)}
+ end.
+
+user_limit_validation() ->
+ [{<<"max-connections">>, fun rabbit_parameter_validation:integer/2, optional},
+ {<<"max-channels">>, fun rabbit_parameter_validation:integer/2, optional}].
+
+clear_user_limits(Username, <<"all">>, ActingUser) ->
+ update_user(Username, fun(User) ->
+ internal_user:clear_limits(User)
+ end),
+ notify_limit_clear(Username, ActingUser);
+clear_user_limits(Username, LimitType, ActingUser) ->
+ update_user(Username, fun(User) ->
+ internal_user:update_limits(remove, User, LimitType)
+ end),
+ notify_limit_clear(Username, ActingUser).
+
+flatten_errors(L) ->
+ case [{F, A} || I <- lists:flatten([L]), {error, F, A} <- [I]] of
+ [] -> ok;
+ E -> {errors, E}
+ end.
+
+%%----------------------------------------------------------------------------
+%% Listing
+
+-define(PERMS_INFO_KEYS, [configure, write, read]).
+-define(USER_INFO_KEYS, [user, tags]).
+
+-spec user_info_keys() -> rabbit_types:info_keys().
+
+user_info_keys() -> ?USER_INFO_KEYS.
+
+-spec perms_info_keys() -> rabbit_types:info_keys().
+
+perms_info_keys() -> [user, vhost | ?PERMS_INFO_KEYS].
+
+-spec vhost_perms_info_keys() -> rabbit_types:info_keys().
+
+vhost_perms_info_keys() -> [user | ?PERMS_INFO_KEYS].
+
+-spec user_perms_info_keys() -> rabbit_types:info_keys().
+
+user_perms_info_keys() -> [vhost | ?PERMS_INFO_KEYS].
+
+-spec user_vhost_perms_info_keys() -> rabbit_types:info_keys().
+
+user_vhost_perms_info_keys() -> ?PERMS_INFO_KEYS.
+
+topic_perms_info_keys() -> [user, vhost, exchange, write, read].
+user_topic_perms_info_keys() -> [vhost, exchange, write, read].
+vhost_topic_perms_info_keys() -> [user, exchange, write, read].
+user_vhost_topic_perms_info_keys() -> [exchange, write, read].
+
+all_users() -> mnesia:dirty_match_object(rabbit_user, internal_user:pattern_match_all()).
+
+-spec list_users() -> [rabbit_types:infos()].
+
+list_users() ->
+ [extract_internal_user_params(U) ||
+ U <- all_users()].
+
+-spec list_users(reference(), pid()) -> 'ok'.
+
+list_users(Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(U) -> extract_internal_user_params(U) end,
+ all_users()).
+
+-spec list_permissions() -> [rabbit_types:infos()].
+
+list_permissions() ->
+ list_permissions(perms_info_keys(), match_user_vhost('_', '_')).
+
+list_permissions(Keys, QueryThunk) ->
+ [extract_user_permission_params(Keys, U) ||
+ U <- rabbit_misc:execute_mnesia_transaction(QueryThunk)].
+
+list_permissions(Keys, QueryThunk, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(U) -> extract_user_permission_params(Keys, U) end,
+ rabbit_misc:execute_mnesia_transaction(QueryThunk)).
+
+filter_props(Keys, Props) -> [T || T = {K, _} <- Props, lists:member(K, Keys)].
+
+-spec list_user_permissions
+ (rabbit_types:username()) -> [rabbit_types:infos()].
+
+list_user_permissions(Username) ->
+ list_permissions(
+ user_perms_info_keys(),
+ rabbit_misc:with_user(Username, match_user_vhost(Username, '_'))).
+
+-spec list_user_permissions
+ (rabbit_types:username(), reference(), pid()) -> 'ok'.
+
+list_user_permissions(Username, Ref, AggregatorPid) ->
+ list_permissions(
+ user_perms_info_keys(),
+ rabbit_misc:with_user(Username, match_user_vhost(Username, '_')),
+ Ref, AggregatorPid).
+
+-spec list_vhost_permissions
+ (rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+list_vhost_permissions(VHostPath) ->
+ list_permissions(
+ vhost_perms_info_keys(),
+ rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath))).
+
+-spec list_vhost_permissions
+ (rabbit_types:vhost(), reference(), pid()) -> 'ok'.
+
+list_vhost_permissions(VHostPath, Ref, AggregatorPid) ->
+ list_permissions(
+ vhost_perms_info_keys(),
+ rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath)),
+ Ref, AggregatorPid).
+
+-spec list_user_vhost_permissions
+ (rabbit_types:username(), rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+list_user_vhost_permissions(Username, VHostPath) ->
+ list_permissions(
+ user_vhost_perms_info_keys(),
+ rabbit_vhost:with_user_and_vhost(
+ Username, VHostPath, match_user_vhost(Username, VHostPath))).
+
+extract_user_permission_params(Keys, #user_permission{
+ user_vhost =
+ #user_vhost{username = Username,
+ virtual_host = VHostPath},
+ permission = #permission{
+ configure = ConfigurePerm,
+ write = WritePerm,
+ read = ReadPerm}}) ->
+ filter_props(Keys, [{user, Username},
+ {vhost, VHostPath},
+ {configure, ConfigurePerm},
+ {write, WritePerm},
+ {read, ReadPerm}]).
+
+extract_internal_user_params(User) ->
+ [{user, internal_user:get_username(User)},
+ {tags, internal_user:get_tags(User)}].
+
+match_user_vhost(Username, VHostPath) ->
+ fun () -> mnesia:match_object(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VHostPath},
+ permission = '_'},
+ read)
+ end.
+
+list_topic_permissions() ->
+ list_topic_permissions(topic_perms_info_keys(), match_user_vhost_topic_permission('_', '_')).
+
+list_user_topic_permissions(Username) ->
+ list_topic_permissions(user_topic_perms_info_keys(),
+ rabbit_misc:with_user(Username, match_user_vhost_topic_permission(Username, '_'))).
+
+list_vhost_topic_permissions(VHost) ->
+ list_topic_permissions(vhost_topic_perms_info_keys(),
+ rabbit_vhost:with(VHost, match_user_vhost_topic_permission('_', VHost))).
+
+list_user_vhost_topic_permissions(Username, VHost) ->
+ list_topic_permissions(user_vhost_topic_perms_info_keys(),
+ rabbit_vhost:with_user_and_vhost(Username, VHost, match_user_vhost_topic_permission(Username, VHost))).
+
+list_topic_permissions(Keys, QueryThunk) ->
+ [extract_topic_permission_params(Keys, U) ||
+ U <- rabbit_misc:execute_mnesia_transaction(QueryThunk)].
+
+match_user_vhost_topic_permission(Username, VHostPath) ->
+ match_user_vhost_topic_permission(Username, VHostPath, '_').
+
+match_user_vhost_topic_permission(Username, VHostPath, Exchange) ->
+ fun () -> mnesia:match_object(
+ rabbit_topic_permission,
+ #topic_permission{topic_permission_key = #topic_permission_key{
+ user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VHostPath},
+ exchange = Exchange},
+ permission = '_'},
+ read)
+ end.
+
+extract_topic_permission_params(Keys, #topic_permission{
+ topic_permission_key = #topic_permission_key{
+ user_vhost = #user_vhost{username = Username,
+ virtual_host = VHostPath},
+ exchange = Exchange},
+ permission = #permission{
+ write = WritePerm,
+ read = ReadPerm}}) ->
+ filter_props(Keys, [{user, Username},
+ {vhost, VHostPath},
+ {exchange, Exchange},
+ {write, WritePerm},
+ {read, ReadPerm}]).
+
+hashing_algorithm(User, Version) ->
+ case maps:get(hashing_algorithm, User, undefined) of
+ undefined ->
+ case Version of
+ %% 3.6.1 and later versions are supposed to have
+ %% the algorithm exported and thus not need a default
+ <<"3.6.0">> -> rabbit_password_hashing_sha256;
+ <<"3.5.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.4.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.3.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.2.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.1.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.0.", _/binary>> -> rabbit_password_hashing_md5;
+ _ -> rabbit_password:hashing_mod()
+ end;
+ Alg -> rabbit_data_coercion:to_atom(Alg, utf8)
+ end.
+
+is_over_connection_limit(Username) ->
+ Fun = fun() ->
+ rabbit_connection_tracking:count_tracked_items_in({user, Username})
+ end,
+ is_over_limit(Username, <<"max-connections">>, Fun).
+
+is_over_channel_limit(Username) ->
+ Fun = fun() ->
+ rabbit_channel_tracking:count_tracked_items_in({user, Username})
+ end,
+ is_over_limit(Username, <<"max-channels">>, Fun).
+
+is_over_limit(Username, LimitType, Fun) ->
+ case get_user_limit(Username, LimitType) of
+ undefined -> false;
+ {ok, 0} -> {true, 0};
+ {ok, Limit} ->
+ case Fun() >= Limit of
+ false -> false;
+ true -> {true, Limit}
+ end
+ end.
+
+get_user_limit(Username, LimitType) ->
+ case lookup_user(Username) of
+ {ok, User} ->
+ case rabbit_misc:pget(LimitType, internal_user:get_limits(User)) of
+ undefined -> undefined;
+ N when N < 0 -> undefined;
+ N when N >= 0 -> {ok, N}
+ end;
+ _ ->
+ undefined
+ end.
+
+get_user_limits() ->
+ [{internal_user:get_username(U), internal_user:get_limits(U)} ||
+ U <- all_users(),
+ internal_user:get_limits(U) =/= #{}].
+
+get_user_limits(Username) ->
+ case lookup_user(Username) of
+ {ok, User} -> internal_user:get_limits(User);
+ _ -> undefined
+ end.
+
+notify_limit_set(Username, ActingUser, Term) ->
+ rabbit_event:notify(user_limits_set,
+ [{name, <<"limits">>}, {user_who_performed_action, ActingUser},
+ {username, Username} | maps:to_list(Term)]).
+
+notify_limit_clear(Username, ActingUser) ->
+ rabbit_event:notify(user_limits_cleared,
+ [{name, <<"limits">>}, {user_who_performed_action, ActingUser},
+ {username, Username}]).
diff --git a/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl b/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl
new file mode 100644
index 0000000000..c81a337153
--- /dev/null
+++ b/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_amqplain).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism amqplain"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"AMQPLAIN">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%% AMQPLAIN, as used by Qpid Python test suite. The 0-8 spec actually
+%% defines this as PLAIN, but in 0-9 that definition is gone, instead
+%% referring generically to "SASL security mechanism", i.e. the above.
+
+description() ->
+ [{description, <<"QPid AMQPLAIN mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ [].
+
+-define(IS_STRING_TYPE(Type), Type =:= longstr orelse Type =:= shortstr).
+
+handle_response(Response, _State) ->
+ LoginTable = rabbit_binary_parser:parse_table(Response),
+ case {lists:keysearch(<<"LOGIN">>, 1, LoginTable),
+ lists:keysearch(<<"PASSWORD">>, 1, LoginTable)} of
+ {{value, {_, UserType, User}},
+ {value, {_, PassType, Pass}}} when ?IS_STRING_TYPE(UserType);
+ ?IS_STRING_TYPE(PassType) ->
+ rabbit_access_control:check_user_pass_login(User, Pass);
+ {{value, {_, _UserType, _User}},
+ {value, {_, _PassType, _Pass}}} ->
+ {protocol_error,
+ "AMQPLAIN auth info ~w uses unsupported type for LOGIN or PASSWORD field",
+ [LoginTable]};
+ _ ->
+ {protocol_error,
+ "AMQPLAIN auth info ~w is missing LOGIN or PASSWORD field",
+ [LoginTable]}
+ end.
diff --git a/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl b/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl
new file mode 100644
index 0000000000..15439c461f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_cr_demo).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism cr-demo"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"RABBIT-CR-DEMO">>,
+ ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+-record(state, {username = undefined}).
+
+%% Provides equivalent security to PLAIN but demos use of Connection.Secure(Ok)
+%% START-OK: Username
+%% SECURE: "Please tell me your password"
+%% SECURE-OK: "My password is ~s", [Password]
+
+description() ->
+ [{description, <<"RabbitMQ Demo challenge-response authentication "
+ "mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ #state{}.
+
+handle_response(Response, State = #state{username = undefined}) ->
+ {challenge, <<"Please tell me your password">>,
+ State#state{username = Response}};
+
+handle_response(<<"My password is ", Password/binary>>,
+ #state{username = Username}) ->
+ rabbit_access_control:check_user_pass_login(Username, Password);
+handle_response(Response, _State) ->
+ {protocol_error, "Invalid response '~s'", [Response]}.
diff --git a/deps/rabbit/src/rabbit_auth_mechanism_plain.erl b/deps/rabbit/src/rabbit_auth_mechanism_plain.erl
new file mode 100644
index 0000000000..d704c72400
--- /dev/null
+++ b/deps/rabbit/src/rabbit_auth_mechanism_plain.erl
@@ -0,0 +1,60 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_plain).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism plain"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"PLAIN">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%% SASL PLAIN, as used by the Qpid Java client and our clients. Also,
+%% apparently, by OpenAMQ.
+
+description() ->
+ [{description, <<"SASL PLAIN authentication mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ [].
+
+handle_response(Response, _State) ->
+ case extract_user_pass(Response) of
+ {ok, User, Pass} ->
+ rabbit_access_control:check_user_pass_login(User, Pass);
+ error ->
+ {protocol_error, "response ~p invalid", [Response]}
+ end.
+
+extract_user_pass(Response) ->
+ case extract_elem(Response) of
+ {ok, User, Response1} -> case extract_elem(Response1) of
+ {ok, Pass, <<>>} -> {ok, User, Pass};
+ _ -> error
+ end;
+ error -> error
+ end.
+
+extract_elem(<<0:8, Rest/binary>>) ->
+ Count = next_null_pos(Rest, 0),
+ <<Elem:Count/binary, Rest1/binary>> = Rest,
+ {ok, Elem, Rest1};
+extract_elem(_) ->
+ error.
+
+next_null_pos(<<>>, Count) -> Count;
+next_null_pos(<<0:8, _Rest/binary>>, Count) -> Count;
+next_null_pos(<<_:8, Rest/binary>>, Count) -> next_null_pos(Rest, Count + 1).
diff --git a/deps/rabbit/src/rabbit_autoheal.erl b/deps/rabbit/src/rabbit_autoheal.erl
new file mode 100644
index 0000000000..6380d71895
--- /dev/null
+++ b/deps/rabbit/src/rabbit_autoheal.erl
@@ -0,0 +1,456 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_autoheal).
+
+-export([init/0, enabled/0, maybe_start/1, rabbit_down/2, node_down/2,
+ handle_msg/3, process_down/2]).
+
+%% The named process we are running in.
+-define(SERVER, rabbit_node_monitor).
+
+-define(MNESIA_STOPPED_PING_INTERNAL, 200).
+
+-define(AUTOHEAL_STATE_AFTER_RESTART, rabbit_autoheal_state_after_restart).
+
+%%----------------------------------------------------------------------------
+
+%% In order to autoheal we want to:
+%%
+%% * Find the winning partition
+%% * Stop all nodes in other partitions
+%% * Wait for them all to be stopped
+%% * Start them again
+%%
+%% To keep things simple, we assume all nodes are up. We don't start
+%% unless all nodes are up, and if a node goes down we abandon the
+%% whole process. To further keep things simple we also defer the
+%% decision as to the winning node to the "leader" - arbitrarily
+%% selected as the first node in the cluster.
+%%
+%% To coordinate the restarting nodes we pick a special node from the
+%% winning partition - the "winner". Restarting nodes then stop, and
+%% wait for it to tell them it is safe to start again. The winner
+%% determines that a node has stopped just by seeing if its rabbit app
+%% stops - if a node stops for any other reason it just gets a message
+%% it will ignore, and otherwise we carry on.
+%%
+%% Meanwhile, the leader may continue to receive new autoheal requests:
+%% all of them are ignored. The winner notifies the leader when the
+%% current autoheal process is finished (ie. when all losers stopped and
+%% were asked to start again) or was aborted. When the leader receives
+%% the notification or if it looses contact with the winner, it can
+%% accept new autoheal requests.
+%%
+%% The winner and the leader are not necessarily the same node.
+%%
+%% The leader can be a loser and will restart in this case. It remembers
+%% there is an autoheal in progress by temporarily saving the autoheal
+%% state to the application environment.
+%%
+%% == Possible states ==
+%%
+%% not_healing
+%% - the default
+%%
+%% {winner_waiting, OutstandingStops, Notify}
+%% - we are the winner and are waiting for all losing nodes to stop
+%% before telling them they can restart
+%%
+%% {leader_waiting, Winner, Notify}
+%% - we are the leader, and have already assigned the winner and losers.
+%% We are waiting for a confirmation from the winner that the autoheal
+%% process has ended. Meanwhile we can ignore autoheal requests.
+%% Because we may be a loser too, this state is saved to the application
+%% environment and restored on startup.
+%%
+%% restarting
+%% - we are restarting. Of course the node monitor immediately dies
+%% then so this state does not last long. We therefore send the
+%% autoheal_safe_to_start message to the rabbit_outside_app_process
+%% instead.
+%%
+%% == Message flow ==
+%%
+%% 1. Any node (leader included) >> {request_start, node()} >> Leader
+%% When Mnesia detects it is running partitioned or
+%% when a remote node starts, rabbit_node_monitor calls
+%% rabbit_autoheal:maybe_start/1. The message above is sent to the
+%% leader so the leader can take a decision.
+%%
+%% 2. Leader >> {become_winner, Losers} >> Winner
+%% The leader notifies the winner so the latter can proceed with
+%% the autoheal.
+%%
+%% 3. Winner >> {winner_is, Winner} >> All losers
+%% The winner notifies losers they must stop.
+%%
+%% 4. Winner >> autoheal_safe_to_start >> All losers
+%% When either all losers stopped or the autoheal process was
+%% aborted, the winner notifies losers they can start again.
+%%
+%% 5. Leader >> report_autoheal_status >> Winner
+%% The leader asks the autoheal status to the winner. This only
+%% happens when the leader is a loser too. If this is not the case,
+%% this message is never sent.
+%%
+%% 6. Winner >> {autoheal_finished, Winner} >> Leader
+%% The winner notifies the leader that the autoheal process was
+%% either finished or aborted (ie. autoheal_safe_to_start was sent
+%% to losers).
+
+%%----------------------------------------------------------------------------
+
+init() ->
+ %% We check the application environment for a saved autoheal state
+ %% saved during a restart. If this node is a leader, it is used
+ %% to determine if it needs to ask the winner to report about the
+ %% autoheal progress.
+ State = case application:get_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART) of
+ {ok, S} -> S;
+ undefined -> not_healing
+ end,
+ ok = application:unset_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART),
+ case State of
+ {leader_waiting, Winner, _} ->
+ rabbit_log:info(
+ "Autoheal: in progress, requesting report from ~p~n", [Winner]),
+ send(Winner, report_autoheal_status);
+ _ ->
+ ok
+ end,
+ State.
+
+maybe_start(not_healing) ->
+ case enabled() of
+ true -> Leader = leader(),
+ send(Leader, {request_start, node()}),
+ rabbit_log:info("Autoheal request sent to ~p~n", [Leader]),
+ not_healing;
+ false -> not_healing
+ end;
+maybe_start(State) ->
+ State.
+
+enabled() ->
+ case application:get_env(rabbit, cluster_partition_handling) of
+ {ok, autoheal} -> true;
+ {ok, {pause_if_all_down, _, autoheal}} -> true;
+ _ -> false
+ end.
+
+leader() ->
+ [Leader | _] = lists:usort(rabbit_mnesia:cluster_nodes(all)),
+ Leader.
+
+%% This is the winner receiving its last notification that a node has
+%% stopped - all nodes can now start again
+rabbit_down(Node, {winner_waiting, [Node], Notify}) ->
+ rabbit_log:info("Autoheal: final node has stopped, starting...~n",[]),
+ winner_finish(Notify);
+
+rabbit_down(Node, {winner_waiting, WaitFor, Notify}) ->
+ {winner_waiting, WaitFor -- [Node], Notify};
+
+rabbit_down(Winner, {leader_waiting, Winner, Losers}) ->
+ abort([Winner], Losers);
+
+rabbit_down(_Node, State) ->
+ %% Ignore. Either:
+ %% o we already cancelled the autoheal process;
+ %% o we are still waiting the winner's report.
+ State.
+
+node_down(_Node, not_healing) ->
+ not_healing;
+
+node_down(Node, {winner_waiting, _, Notify}) ->
+ abort([Node], Notify);
+
+node_down(Node, {leader_waiting, Node, _Notify}) ->
+ %% The winner went down, we don't know what to do so we simply abort.
+ rabbit_log:info("Autoheal: aborting - winner ~p went down~n", [Node]),
+ not_healing;
+
+node_down(Node, {leader_waiting, _, _} = St) ->
+ %% If it is a partial partition, the winner might continue with the
+ %% healing process. If it is a full partition, the winner will also
+ %% see it and abort. Let's wait for it.
+ rabbit_log:info("Autoheal: ~p went down, waiting for winner decision ~n", [Node]),
+ St;
+
+node_down(Node, _State) ->
+ rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]),
+ not_healing.
+
+%% If the process that has to restart the node crashes for an unexpected reason,
+%% we go back to a not healing state so the node is able to recover.
+process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal ->
+ rabbit_log:info("Autoheal: aborting - the process responsible for restarting the "
+ "node terminated with reason: ~p~n", [Reason]),
+ not_healing;
+
+process_down(_, State) ->
+ State.
+
+%% By receiving this message we become the leader
+%% TODO should we try to debounce this?
+handle_msg({request_start, Node},
+ not_healing, Partitions) ->
+ rabbit_log:info("Autoheal request received from ~p~n", [Node]),
+ case check_other_nodes(Partitions) of
+ {error, E} ->
+ rabbit_log:info("Autoheal request denied: ~s~n", [fmt_error(E)]),
+ not_healing;
+ {ok, AllPartitions} ->
+ {Winner, Losers} = make_decision(AllPartitions),
+ rabbit_log:info("Autoheal decision~n"
+ " * Partitions: ~p~n"
+ " * Winner: ~p~n"
+ " * Losers: ~p~n",
+ [AllPartitions, Winner, Losers]),
+ case node() =:= Winner of
+ true -> handle_msg({become_winner, Losers},
+ not_healing, Partitions);
+ false -> send(Winner, {become_winner, Losers}),
+ {leader_waiting, Winner, Losers}
+ end
+ end;
+
+handle_msg({request_start, Node},
+ State, _Partitions) ->
+ rabbit_log:info("Autoheal request received from ~p when healing; "
+ "ignoring~n", [Node]),
+ State;
+
+handle_msg({become_winner, Losers},
+ not_healing, _Partitions) ->
+ rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop~n",
+ [Losers]),
+ stop_partition(Losers);
+
+handle_msg({become_winner, Losers},
+ {winner_waiting, _, Losers}, _Partitions) ->
+ %% The leader has aborted the healing, might have seen us down but
+ %% we didn't see the same. Let's try again as it is the same partition.
+ rabbit_log:info("Autoheal: I am the winner and received a duplicated "
+ "request, waiting again for ~p to stop~n", [Losers]),
+ stop_partition(Losers);
+
+handle_msg({become_winner, _},
+ {winner_waiting, _, Losers}, _Partitions) ->
+ %% Something has happened to the leader, it might have seen us down but we
+ %% are still alive. Partitions have changed, cannot continue.
+ rabbit_log:info("Autoheal: I am the winner and received another healing "
+ "request, partitions have changed to ~p. Aborting ~n", [Losers]),
+ winner_finish(Losers),
+ not_healing;
+
+handle_msg({winner_is, Winner}, State = not_healing,
+ _Partitions) ->
+ %% This node is a loser, nothing else.
+ Pid = restart_loser(State, Winner),
+ {restarting, Pid};
+handle_msg({winner_is, Winner}, State = {leader_waiting, Winner, _},
+ _Partitions) ->
+ %% This node is the leader and a loser at the same time.
+ Pid = restart_loser(State, Winner),
+ {restarting, Pid};
+
+handle_msg(Request, {restarting, Pid} = St, _Partitions) ->
+ %% ignore, we can contribute no further
+ rabbit_log:info("Autoheal: Received the request ~p while waiting for ~p "
+ "to restart the node. Ignoring it ~n", [Request, Pid]),
+ St;
+
+handle_msg(report_autoheal_status, not_healing, _Partitions) ->
+ %% The leader is asking about the autoheal status to us (the
+ %% winner). This happens when the leader is a loser and it just
+ %% restarted. We are in the "not_healing" state, so the previous
+ %% autoheal process ended: let's tell this to the leader.
+ send(leader(), {autoheal_finished, node()}),
+ not_healing;
+
+handle_msg(report_autoheal_status, State, _Partitions) ->
+ %% Like above, the leader is asking about the autoheal status. We
+ %% are not finished with it. There is no need to send anything yet
+ %% to the leader: we will send the notification when it is over.
+ State;
+
+handle_msg({autoheal_finished, Winner},
+ {leader_waiting, Winner, _}, _Partitions) ->
+ %% The winner is finished with the autoheal process and notified us
+ %% (the leader). We can transition to the "not_healing" state and
+ %% accept new requests.
+ rabbit_log:info("Autoheal finished according to winner ~p~n", [Winner]),
+ not_healing;
+
+handle_msg({autoheal_finished, Winner}, not_healing, _Partitions)
+ when Winner =:= node() ->
+ %% We are the leader and the winner. The state already transitioned
+ %% to "not_healing" at the end of the autoheal process.
+ rabbit_log:info("Autoheal finished according to winner ~p~n", [node()]),
+ not_healing;
+
+handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
+ %% We might have seen the winner down during a partial partition and
+ %% transitioned to not_healing. However, the winner was still able
+ %% to finish. Let it pass.
+ rabbit_log:info("Autoheal finished according to winner ~p."
+ " Unexpected, I might have previously seen the winner down~n", [Winner]),
+ not_healing.
+
+%%----------------------------------------------------------------------------
+
+send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
+
+abort(Down, Notify) ->
+ rabbit_log:info("Autoheal: aborting - ~p down~n", [Down]),
+ %% Make sure any nodes waiting for us start - it won't necessarily
+ %% heal the partition but at least they won't get stuck.
+ %% If we are executing this, we are not stopping. Thus, don't wait
+ %% for ourselves!
+ winner_finish(Notify -- [node()]).
+
+winner_finish(Notify) ->
+ %% There is a race in Mnesia causing a starting loser to hang
+ %% forever if another loser stops at the same time: the starting
+ %% node connects to the other node, negotiates the protocol and
+ %% attempts to acquire a write lock on the schema on the other node.
+ %% If the other node stops between the protocol negotiation and lock
+ %% request, the starting node never gets an answer to its lock
+ %% request.
+ %%
+ %% To work around the problem, we make sure Mnesia is stopped on all
+ %% losing nodes before sending the "autoheal_safe_to_start" signal.
+ wait_for_mnesia_shutdown(Notify),
+ [{rabbit_outside_app_process, N} ! autoheal_safe_to_start || N <- Notify],
+ send(leader(), {autoheal_finished, node()}),
+ not_healing.
+
+%% This improves the previous implementation, but could still potentially enter an infinity
+%% loop. If it also possible that for when it finishes some of the nodes have been
+%% manually restarted, but we can't do much more (apart from stop them again). So let it
+%% continue and notify all the losers to restart.
+wait_for_mnesia_shutdown(AllNodes) ->
+ Monitors = lists:foldl(fun(Node, Monitors0) ->
+ pmon:monitor({mnesia_sup, Node}, Monitors0)
+ end, pmon:new(), AllNodes),
+ wait_for_supervisors(Monitors).
+
+wait_for_supervisors(Monitors) ->
+ case pmon:is_empty(Monitors) of
+ true ->
+ ok;
+ false ->
+ receive
+ {'DOWN', _MRef, process, {mnesia_sup, _} = I, _Reason} ->
+ wait_for_supervisors(pmon:erase(I, Monitors))
+ after
+ 60000 ->
+ AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)],
+ rabbit_log:info("Autoheal: mnesia in nodes ~p is still up, sending "
+ "winner notification again to these ~n", [AliveLosers]),
+ [send(L, {winner_is, node()}) || L <- AliveLosers],
+ wait_for_mnesia_shutdown(AliveLosers)
+ end
+ end.
+
+restart_loser(State, Winner) ->
+ rabbit_log:warning(
+ "Autoheal: we were selected to restart; winner is ~p~n", [Winner]),
+ NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000),
+ rabbit_node_monitor:run_outside_applications(
+ fun () ->
+ MRef = erlang:monitor(process, {?SERVER, Winner}),
+ rabbit:stop(),
+ NextState = receive
+ {'DOWN', MRef, process, {?SERVER, Winner}, _Reason} ->
+ not_healing;
+ autoheal_safe_to_start ->
+ State
+ after NextStateTimeout ->
+ rabbit_log:warning(
+ "Autoheal: timed out waiting for a safe-to-start message from the winner (~p); will retry",
+ [Winner]),
+ not_healing
+ end,
+ erlang:demonitor(MRef, [flush]),
+ %% During the restart, the autoheal state is lost so we
+ %% store it in the application environment temporarily so
+ %% init/0 can pick it up.
+ %%
+ %% This is useful to the leader which is a loser at the
+ %% same time: because the leader is restarting, there
+ %% is a great chance it misses the "autoheal finished!"
+ %% notification from the winner. Thanks to the saved
+ %% state, it knows it needs to ask the winner if the
+ %% autoheal process is finished or not.
+ application:set_env(rabbit,
+ ?AUTOHEAL_STATE_AFTER_RESTART, NextState),
+ rabbit:start()
+ end, true).
+
+make_decision(AllPartitions) ->
+ Sorted = lists:sort([{partition_value(P), P} || P <- AllPartitions]),
+ [[Winner | _] | Rest] = lists:reverse([P || {_, P} <- Sorted]),
+ {Winner, lists:append(Rest)}.
+
+partition_value(Partition) ->
+ Connections = [Res || Node <- Partition,
+ Res <- [rpc:call(Node, rabbit_networking,
+ connections_local, [])],
+ is_list(Res)],
+ {length(lists:append(Connections)), length(Partition)}.
+
+%% We have our local understanding of what partitions exist; but we
+%% only know which nodes we have been partitioned from, not which
+%% nodes are partitioned from each other.
+check_other_nodes(LocalPartitions) ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ {Results, Bad} = rabbit_node_monitor:status(Nodes -- [node()]),
+ RemotePartitions = [{Node, proplists:get_value(partitions, Res)}
+ || {Node, Res} <- Results],
+ RemoteDown = [{Node, Down}
+ || {Node, Res} <- Results,
+ Down <- [Nodes -- proplists:get_value(nodes, Res)],
+ Down =/= []],
+ case {Bad, RemoteDown} of
+ {[], []} -> Partitions = [{node(), LocalPartitions} | RemotePartitions],
+ {ok, all_partitions(Partitions, [Nodes])};
+ {[], _} -> {error, {remote_down, RemoteDown}};
+ {_, _} -> {error, {nodes_down, Bad}}
+ end.
+
+all_partitions([], Partitions) ->
+ Partitions;
+all_partitions([{Node, CantSee} | Rest], Partitions) ->
+ {[Containing], Others} =
+ lists:partition(fun (Part) -> lists:member(Node, Part) end, Partitions),
+ A = Containing -- CantSee,
+ B = Containing -- A,
+ Partitions1 = case {A, B} of
+ {[], _} -> Partitions;
+ {_, []} -> Partitions;
+ _ -> [A, B | Others]
+ end,
+ all_partitions(Rest, Partitions1).
+
+fmt_error({remote_down, RemoteDown}) ->
+ rabbit_misc:format("Remote nodes disconnected:~n ~p", [RemoteDown]);
+fmt_error({nodes_down, NodesDown}) ->
+ rabbit_misc:format("Local nodes down: ~p", [NodesDown]).
+
+stop_partition(Losers) ->
+ %% The leader said everything was ready - do we agree? If not then
+ %% give up.
+ Down = Losers -- rabbit_node_monitor:alive_rabbit_nodes(Losers),
+ case Down of
+ [] -> [send(L, {winner_is, node()}) || L <- Losers],
+ {winner_waiting, Losers, Losers};
+ _ -> abort(Down, Losers)
+ end.
diff --git a/deps/rabbit/src/rabbit_backing_queue.erl b/deps/rabbit/src/rabbit_backing_queue.erl
new file mode 100644
index 0000000000..4d709e14d0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_backing_queue.erl
@@ -0,0 +1,264 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_backing_queue).
+
+-export([info_keys/0]).
+
+-define(INFO_KEYS, [messages_ram, messages_ready_ram,
+ messages_unacknowledged_ram, messages_persistent,
+ message_bytes, message_bytes_ready,
+ message_bytes_unacknowledged, message_bytes_ram,
+ message_bytes_persistent, head_message_timestamp,
+ disk_reads, disk_writes, backing_queue_status,
+ messages_paged_out, message_bytes_paged_out]).
+
+%% We can't specify a per-queue ack/state with callback signatures
+-type ack() :: any().
+-type state() :: any().
+
+-type flow() :: 'flow' | 'noflow'.
+-type msg_ids() :: [rabbit_types:msg_id()].
+-type publish() :: {rabbit_types:basic_message(),
+ rabbit_types:message_properties(), boolean()}.
+-type delivered_publish() :: {rabbit_types:basic_message(),
+ rabbit_types:message_properties()}.
+-type fetch_result(Ack) ::
+ ('empty' | {rabbit_types:basic_message(), boolean(), Ack}).
+-type drop_result(Ack) ::
+ ('empty' | {rabbit_types:msg_id(), Ack}).
+-type recovery_terms() :: [term()] | 'non_clean_shutdown'.
+-type recovery_info() :: 'new' | recovery_terms().
+-type purged_msg_count() :: non_neg_integer().
+-type async_callback() ::
+ fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok').
+-type duration() :: ('undefined' | 'infinity' | number()).
+
+-type msg_fun(A) :: fun ((rabbit_types:basic_message(), ack(), A) -> A).
+-type msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean()).
+
+-type queue_mode() :: atom().
+
+%% Called on startup with a vhost and a list of durable queue names on this vhost.
+%% The queues aren't being started at this point, but this call allows the
+%% backing queue to perform any checking necessary for the consistency
+%% of those queues, or initialise any other shared resources.
+%%
+%% The list of queue recovery terms returned as {ok, Terms} must be given
+%% in the same order as the list of queue names supplied.
+-callback start(rabbit_types:vhost(), [rabbit_amqqueue:name()]) -> rabbit_types:ok(recovery_terms()).
+
+%% Called to tear down any state/resources for vhost. NB: Implementations should
+%% not depend on this function being called on shutdown and instead
+%% should hook into the rabbit supervision hierarchy.
+-callback stop(rabbit_types:vhost()) -> 'ok'.
+
+%% Initialise the backing queue and its state.
+%%
+%% Takes
+%% 1. the amqqueue record
+%% 2. a term indicating whether the queue is an existing queue that
+%% should be recovered or not. When 'new' is given, no recovery is
+%% taking place, otherwise a list of recovery terms is given, or
+%% the atom 'non_clean_shutdown' if no recovery terms are available.
+%% 3. an asynchronous callback which accepts a function of type
+%% backing-queue-state to backing-queue-state. This callback
+%% function can be safely invoked from any process, which makes it
+%% useful for passing messages back into the backing queue,
+%% especially as the backing queue does not have control of its own
+%% mailbox.
+-callback init(amqqueue:amqqueue(), recovery_info(),
+ async_callback()) -> state().
+
+%% Called on queue shutdown when queue isn't being deleted.
+-callback terminate(any(), state()) -> state().
+
+%% Called when the queue is terminating and needs to delete all its
+%% content.
+-callback delete_and_terminate(any(), state()) -> state().
+
+%% Called to clean up after a crashed queue. In this case we don't
+%% have a process and thus a state(), we are just removing on-disk data.
+-callback delete_crashed(amqqueue:amqqueue()) -> 'ok'.
+
+%% Remove all 'fetchable' messages from the queue, i.e. all messages
+%% except those that have been fetched already and are pending acks.
+-callback purge(state()) -> {purged_msg_count(), state()}.
+
+%% Remove all messages in the queue which have been fetched and are
+%% pending acks.
+-callback purge_acks(state()) -> state().
+
+%% Publish a message.
+-callback publish(rabbit_types:basic_message(),
+ rabbit_types:message_properties(), boolean(), pid(), flow(),
+ state()) -> state().
+
+%% Like publish/6 but for batches of publishes.
+-callback batch_publish([publish()], pid(), flow(), state()) -> state().
+
+%% Called for messages which have already been passed straight
+%% out to a client. The queue will be empty for these calls
+%% (i.e. saves the round trip through the backing queue).
+-callback publish_delivered(rabbit_types:basic_message(),
+ rabbit_types:message_properties(), pid(), flow(),
+ state())
+ -> {ack(), state()}.
+
+%% Like publish_delivered/5 but for batches of publishes.
+-callback batch_publish_delivered([delivered_publish()], pid(), flow(),
+ state())
+ -> {[ack()], state()}.
+
+%% Called to inform the BQ about messages which have reached the
+%% queue, but are not going to be further passed to BQ.
+-callback discard(rabbit_types:msg_id(), pid(), flow(), state()) -> state().
+
+%% Return ids of messages which have been confirmed since the last
+%% invocation of this function (or initialisation).
+%%
+%% Message ids should only appear in the result of drain_confirmed
+%% under the following circumstances:
+%%
+%% 1. The message appears in a call to publish_delivered/4 and the
+%% first argument (ack_required) is false; or
+%% 2. The message is fetched from the queue with fetch/2 and the first
+%% argument (ack_required) is false; or
+%% 3. The message is acked (ack/2 is called for the message); or
+%% 4. The message is fully fsync'd to disk in such a way that the
+%% recovery of the message is guaranteed in the event of a crash of
+%% this rabbit node (excluding hardware failure).
+%%
+%% In addition to the above conditions, a message id may only appear
+%% in the result of drain_confirmed if
+%% #message_properties.needs_confirming = true when the msg was
+%% published (through whichever means) to the backing queue.
+%%
+%% It is legal for the same message id to appear in the results of
+%% multiple calls to drain_confirmed, which means that the backing
+%% queue is not required to keep track of which messages it has
+%% already confirmed. The confirm will be issued to the publisher the
+%% first time the message id appears in the result of
+%% drain_confirmed. All subsequent appearances of that message id will
+%% be ignored.
+-callback drain_confirmed(state()) -> {msg_ids(), state()}.
+
+%% Drop messages from the head of the queue while the supplied
+%% predicate on message properties returns true. Returns the first
+%% message properties for which the predicate returned false, or
+%% 'undefined' if the whole backing queue was traversed w/o the
+%% predicate ever returning false.
+-callback dropwhile(msg_pred(), state())
+ -> {rabbit_types:message_properties() | undefined, state()}.
+
+%% Like dropwhile, except messages are fetched in "require
+%% acknowledgement" mode and are passed, together with their ack tag,
+%% to the supplied function. The function is also fed an
+%% accumulator. The result of fetchwhile is as for dropwhile plus the
+%% accumulator.
+-callback fetchwhile(msg_pred(), msg_fun(A), A, state())
+ -> {rabbit_types:message_properties() | undefined,
+ A, state()}.
+
+%% Produce the next message.
+-callback fetch(true, state()) -> {fetch_result(ack()), state()};
+ (false, state()) -> {fetch_result(undefined), state()}.
+
+%% Remove the next message.
+-callback drop(true, state()) -> {drop_result(ack()), state()};
+ (false, state()) -> {drop_result(undefined), state()}.
+
+%% Acktags supplied are for messages which can now be forgotten
+%% about. Must return 1 msg_id per Ack, in the same order as Acks.
+-callback ack([ack()], state()) -> {msg_ids(), state()}.
+
+%% Reinsert messages into the queue which have already been delivered
+%% and were pending acknowledgement.
+-callback requeue([ack()], state()) -> {msg_ids(), state()}.
+
+%% Fold over messages by ack tag. The supplied function is called with
+%% each message, its ack tag, and an accumulator.
+-callback ackfold(msg_fun(A), A, state(), [ack()]) -> {A, state()}.
+
+%% Fold over all the messages in a queue and return the accumulated
+%% results, leaving the queue undisturbed.
+-callback fold(fun((rabbit_types:basic_message(),
+ rabbit_types:message_properties(),
+ boolean(), A) -> {('stop' | 'cont'), A}),
+ A, state()) -> {A, state()}.
+
+%% How long is my queue?
+-callback len(state()) -> non_neg_integer().
+
+%% Is my queue empty?
+-callback is_empty(state()) -> boolean().
+
+%% What's the queue depth, where depth = length + number of pending acks
+-callback depth(state()) -> non_neg_integer().
+
+%% For the next three functions, the assumption is that you're
+%% monitoring something like the ingress and egress rates of the
+%% queue. The RAM duration is thus the length of time represented by
+%% the messages held in RAM given the current rates. If you want to
+%% ignore all of this stuff, then do so, and return 0 in
+%% ram_duration/1.
+
+%% The target is to have no more messages in RAM than indicated by the
+%% duration and the current queue rates.
+-callback set_ram_duration_target(duration(), state()) -> state().
+
+%% Optionally recalculate the duration internally (likely to be just
+%% update your internal rates), and report how many seconds the
+%% messages in RAM represent given the current rates of the queue.
+-callback ram_duration(state()) -> {duration(), state()}.
+
+%% Should 'timeout' be called as soon as the queue process can manage
+%% (either on an empty mailbox, or when a timer fires)?
+-callback needs_timeout(state()) -> 'false' | 'timed' | 'idle'.
+
+%% Called (eventually) after needs_timeout returns 'idle' or 'timed'.
+%% Note this may be called more than once for each 'idle' or 'timed'
+%% returned from needs_timeout
+-callback timeout(state()) -> state().
+
+%% Called immediately before the queue hibernates.
+-callback handle_pre_hibernate(state()) -> state().
+
+%% Called when more credit has become available for credit_flow.
+-callback resume(state()) -> state().
+
+%% Used to help prioritisation in rabbit_amqqueue_process. The rate of
+%% inbound messages and outbound messages at the moment.
+-callback msg_rates(state()) -> {float(), float()}.
+
+-callback info(atom(), state()) -> any().
+
+%% Passed a function to be invoked with the relevant backing queue's
+%% state. Useful for when the backing queue or other components need
+%% to pass functions into the backing queue.
+-callback invoke(atom(), fun ((atom(), A) -> A), state()) -> state().
+
+%% Called prior to a publish or publish_delivered call. Allows the BQ
+%% to signal that it's already seen this message, (e.g. it was published
+%% or discarded previously) specifying whether to drop the message or reject it.
+-callback is_duplicate(rabbit_types:basic_message(), state())
+ -> {{true, drop} | {true, reject} | boolean(), state()}.
+
+-callback set_queue_mode(queue_mode(), state()) -> state().
+
+-callback zip_msgs_and_acks([delivered_publish()],
+ [ack()], Acc, state())
+ -> Acc.
+
+%% Called when rabbit_amqqueue_process receives a message via
+%% handle_info and it should be processed by the backing
+%% queue
+-callback handle_info(term(), state()) -> state().
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
diff --git a/deps/rabbit/src/rabbit_basic.erl b/deps/rabbit/src/rabbit_basic.erl
new file mode 100644
index 0000000000..cdc9e082e4
--- /dev/null
+++ b/deps/rabbit/src/rabbit_basic.erl
@@ -0,0 +1,354 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_basic).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([publish/4, publish/5, publish/1,
+ message/3, message/4, properties/1, prepend_table_header/3,
+ extract_headers/1, extract_timestamp/1, map_headers/2, delivery/4,
+ header_routes/1, parse_expiration/1, header/2, header/3]).
+-export([build_content/2, from_content/1, msg_size/1,
+ maybe_gc_large_msg/1, maybe_gc_large_msg/2]).
+-export([add_header/4,
+ peek_fmt_message/1]).
+
+%%----------------------------------------------------------------------------
+
+-type properties_input() ::
+ rabbit_framing:amqp_property_record() | [{atom(), any()}].
+-type publish_result() ::
+ ok | rabbit_types:error('not_found').
+-type header() :: any().
+-type headers() :: rabbit_framing:amqp_table() | 'undefined'.
+
+-type exchange_input() :: rabbit_types:exchange() | rabbit_exchange:name().
+-type body_input() :: binary() | [binary()].
+
+%%----------------------------------------------------------------------------
+
+%% Convenience function, for avoiding round-trips in calls across the
+%% erlang distributed network.
+
+-spec publish
+ (exchange_input(), rabbit_router:routing_key(), properties_input(),
+ body_input()) ->
+ publish_result().
+
+publish(Exchange, RoutingKeyBin, Properties, Body) ->
+ publish(Exchange, RoutingKeyBin, false, Properties, Body).
+
+%% Convenience function, for avoiding round-trips in calls across the
+%% erlang distributed network.
+
+-spec publish
+ (exchange_input(), rabbit_router:routing_key(), boolean(),
+ properties_input(), body_input()) ->
+ publish_result().
+
+publish(X = #exchange{name = XName}, RKey, Mandatory, Props, Body) ->
+ Message = message(XName, RKey, properties(Props), Body),
+ publish(X, delivery(Mandatory, false, Message, undefined));
+publish(XName, RKey, Mandatory, Props, Body) ->
+ Message = message(XName, RKey, properties(Props), Body),
+ publish(delivery(Mandatory, false, Message, undefined)).
+
+-spec publish(rabbit_types:delivery()) -> publish_result().
+
+publish(Delivery = #delivery{
+ message = #basic_message{exchange_name = XName}}) ->
+ case rabbit_exchange:lookup(XName) of
+ {ok, X} -> publish(X, Delivery);
+ Err -> Err
+ end.
+
+publish(X, Delivery) ->
+ Qs = rabbit_amqqueue:lookup(rabbit_exchange:route(X, Delivery)),
+ _ = rabbit_queue_type:deliver(Qs, Delivery, stateless),
+ ok.
+
+-spec delivery
+ (boolean(), boolean(), rabbit_types:message(), undefined | integer()) ->
+ rabbit_types:delivery().
+
+delivery(Mandatory, Confirm, Message, MsgSeqNo) ->
+ #delivery{mandatory = Mandatory, confirm = Confirm, sender = self(),
+ message = Message, msg_seq_no = MsgSeqNo, flow = noflow}.
+
+-spec build_content
+ (rabbit_framing:amqp_property_record(), binary() | [binary()]) ->
+ rabbit_types:content().
+
+build_content(Properties, BodyBin) when is_binary(BodyBin) ->
+ build_content(Properties, [BodyBin]);
+
+build_content(Properties, PFR) ->
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ #content{class_id = ClassId,
+ properties = Properties,
+ properties_bin = none,
+ protocol = none,
+ payload_fragments_rev = PFR}.
+
+-spec from_content
+ (rabbit_types:content()) ->
+ {rabbit_framing:amqp_property_record(), binary()}.
+
+from_content(Content) ->
+ #content{class_id = ClassId,
+ properties = Props,
+ payload_fragments_rev = FragmentsRev} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ {Props, list_to_binary(lists:reverse(FragmentsRev))}.
+
+%% This breaks the spec rule forbidding message modification
+strip_header(#content{properties = #'P_basic'{headers = undefined}}
+ = DecodedContent, _Key) ->
+ DecodedContent;
+strip_header(#content{properties = Props = #'P_basic'{headers = Headers}}
+ = DecodedContent, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ false -> DecodedContent;
+ {value, Found} -> Headers0 = lists:delete(Found, Headers),
+ rabbit_binary_generator:clear_encoded_content(
+ DecodedContent#content{
+ properties = Props#'P_basic'{
+ headers = Headers0}})
+ end.
+
+-spec message
+ (rabbit_exchange:name(), rabbit_router:routing_key(),
+ rabbit_types:decoded_content()) ->
+ rabbit_types:ok_or_error2(rabbit_types:message(), any()).
+
+message(XName, RoutingKey, #content{properties = Props} = DecodedContent) ->
+ try
+ {ok, #basic_message{
+ exchange_name = XName,
+ content = strip_header(DecodedContent, ?DELETED_HEADER),
+ id = rabbit_guid:gen(),
+ is_persistent = is_message_persistent(DecodedContent),
+ routing_keys = [RoutingKey |
+ header_routes(Props#'P_basic'.headers)]}}
+ catch
+ {error, _Reason} = Error -> Error
+ end.
+
+-spec message
+ (rabbit_exchange:name(), rabbit_router:routing_key(), properties_input(),
+ binary()) ->
+ rabbit_types:message().
+
+message(XName, RoutingKey, RawProperties, Body) ->
+ Properties = properties(RawProperties),
+ Content = build_content(Properties, Body),
+ {ok, Msg} = message(XName, RoutingKey, Content),
+ Msg.
+
+-spec properties
+ (properties_input()) -> rabbit_framing:amqp_property_record().
+
+properties(P = #'P_basic'{}) ->
+ P;
+properties(P) when is_list(P) ->
+ %% Yes, this is O(length(P) * record_info(size, 'P_basic') / 2),
+ %% i.e. slow. Use the definition of 'P_basic' directly if
+ %% possible!
+ lists:foldl(fun ({Key, Value}, Acc) ->
+ case indexof(record_info(fields, 'P_basic'), Key) of
+ 0 -> throw({unknown_basic_property, Key});
+ N -> setelement(N + 1, Acc, Value)
+ end
+ end, #'P_basic'{}, P).
+
+-spec prepend_table_header
+ (binary(), rabbit_framing:amqp_table(), headers()) -> headers().
+
+prepend_table_header(Name, Info, undefined) ->
+ prepend_table_header(Name, Info, []);
+prepend_table_header(Name, Info, Headers) ->
+ case rabbit_misc:table_lookup(Headers, Name) of
+ {array, Existing} ->
+ prepend_table(Name, Info, Existing, Headers);
+ undefined ->
+ prepend_table(Name, Info, [], Headers);
+ Other ->
+ Headers2 = prepend_table(Name, Info, [], Headers),
+ set_invalid_header(Name, Other, Headers2)
+ end.
+
+prepend_table(Name, Info, Prior, Headers) ->
+ rabbit_misc:set_table_value(Headers, Name, array, [{table, Info} | Prior]).
+
+set_invalid_header(Name, {_, _}=Value, Headers) when is_list(Headers) ->
+ case rabbit_misc:table_lookup(Headers, ?INVALID_HEADERS_KEY) of
+ undefined ->
+ set_invalid([{Name, array, [Value]}], Headers);
+ {table, ExistingHdr} ->
+ update_invalid(Name, Value, ExistingHdr, Headers);
+ Other ->
+ %% somehow the x-invalid-headers header is corrupt
+ Invalid = [{?INVALID_HEADERS_KEY, array, [Other]}],
+ set_invalid_header(Name, Value, set_invalid(Invalid, Headers))
+ end.
+
+set_invalid(NewHdr, Headers) ->
+ rabbit_misc:set_table_value(Headers, ?INVALID_HEADERS_KEY, table, NewHdr).
+
+update_invalid(Name, Value, ExistingHdr, Header) ->
+ Values = case rabbit_misc:table_lookup(ExistingHdr, Name) of
+ undefined -> [Value];
+ {array, Prior} -> [Value | Prior]
+ end,
+ NewHdr = rabbit_misc:set_table_value(ExistingHdr, Name, array, Values),
+ set_invalid(NewHdr, Header).
+
+-spec header(header(), headers()) -> 'undefined' | any().
+
+header(_Header, undefined) ->
+ undefined;
+header(_Header, []) ->
+ undefined;
+header(Header, Headers) ->
+ header(Header, Headers, undefined).
+
+-spec header(header(), headers(), any()) -> 'undefined' | any().
+
+header(Header, Headers, Default) ->
+ case lists:keysearch(Header, 1, Headers) of
+ false -> Default;
+ {value, Val} -> Val
+ end.
+
+-spec extract_headers(rabbit_types:content()) -> headers().
+
+extract_headers(Content) ->
+ #content{properties = #'P_basic'{headers = Headers}} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ Headers.
+
+extract_timestamp(Content) ->
+ #content{properties = #'P_basic'{timestamp = Timestamp}} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ Timestamp.
+
+-spec map_headers
+ (fun((headers()) -> headers()), rabbit_types:content()) ->
+ rabbit_types:content().
+
+map_headers(F, Content) ->
+ Content1 = rabbit_binary_parser:ensure_content_decoded(Content),
+ #content{properties = #'P_basic'{headers = Headers} = Props} = Content1,
+ Headers1 = F(Headers),
+ rabbit_binary_generator:clear_encoded_content(
+ Content1#content{properties = Props#'P_basic'{headers = Headers1}}).
+
+indexof(L, Element) -> indexof(L, Element, 1).
+
+indexof([], _Element, _N) -> 0;
+indexof([Element | _Rest], Element, N) -> N;
+indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1).
+
+is_message_persistent(#content{properties = #'P_basic'{
+ delivery_mode = Mode}}) ->
+ case Mode of
+ 1 -> false;
+ 2 -> true;
+ undefined -> false;
+ Other -> throw({error, {delivery_mode_unknown, Other}})
+ end.
+
+%% Extract CC routes from headers
+
+-spec header_routes(undefined | rabbit_framing:amqp_table()) -> [string()].
+
+header_routes(undefined) ->
+ [];
+header_routes(HeadersTable) ->
+ lists:append(
+ [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of
+ {array, Routes} -> [Route || {longstr, Route} <- Routes];
+ undefined -> [];
+ {Type, _Val} -> throw({error, {unacceptable_type_in_header,
+ binary_to_list(HeaderKey), Type}})
+ end || HeaderKey <- ?ROUTING_HEADERS]).
+
+-spec parse_expiration
+ (rabbit_framing:amqp_property_record()) ->
+ rabbit_types:ok_or_error2('undefined' | non_neg_integer(), any()).
+
+parse_expiration(#'P_basic'{expiration = undefined}) ->
+ {ok, undefined};
+parse_expiration(#'P_basic'{expiration = Expiration}) ->
+ case string:to_integer(binary_to_list(Expiration)) of
+ {error, no_integer} = E ->
+ E;
+ {N, ""} ->
+ case rabbit_misc:check_expiry(N) of
+ ok -> {ok, N};
+ E = {error, _} -> E
+ end;
+ {_, S} ->
+ {error, {leftover_string, S}}
+ end.
+
+maybe_gc_large_msg(Content) ->
+ rabbit_writer:maybe_gc_large_msg(Content).
+
+maybe_gc_large_msg(Content, undefined) ->
+ rabbit_writer:msg_size(Content);
+maybe_gc_large_msg(Content, GCThreshold) ->
+ rabbit_writer:maybe_gc_large_msg(Content, GCThreshold).
+
+msg_size(Content) ->
+ rabbit_writer:msg_size(Content).
+
+add_header(Name, Type, Value, #basic_message{content = Content0} = Msg) ->
+ Content = rabbit_basic:map_headers(
+ fun(undefined) ->
+ rabbit_misc:set_table_value([], Name, Type, Value);
+ (Headers) ->
+ rabbit_misc:set_table_value(Headers, Name, Type, Value)
+ end, Content0),
+ Msg#basic_message{content = Content}.
+
+peek_fmt_message(#basic_message{exchange_name = Ex,
+ routing_keys = RKeys,
+ content =
+ #content{payload_fragments_rev = Payl0,
+ properties = Props}}) ->
+ Fields = [atom_to_binary(F, utf8) || F <- record_info(fields, 'P_basic')],
+ T = lists:zip(Fields, tl(tuple_to_list(Props))),
+ lists:foldl(
+ fun ({<<"headers">>, Hdrs}, Acc) ->
+ case Hdrs of
+ [] ->
+ Acc;
+ _ ->
+ Acc ++ [{header_key(H), V} || {H, _T, V} <- Hdrs]
+ end;
+ ({_, undefined}, Acc) ->
+ Acc;
+ (KV, Acc) ->
+ [KV | Acc]
+ end, [], [{<<"payload (max 64 bytes)">>,
+ %% restric payload to 64 bytes
+ binary_prefix_64(iolist_to_binary(lists:reverse(Payl0)), 64)},
+ {<<"exchange">>, Ex#resource.name},
+ {<<"routing_keys">>, RKeys} | T]).
+
+header_key(A) ->
+ <<"header.", A/binary>>.
+
+binary_prefix_64(Bin, Len) ->
+ binary:part(Bin, 0, min(byte_size(Bin), Len)).
diff --git a/deps/rabbit/src/rabbit_binding.erl b/deps/rabbit/src/rabbit_binding.erl
new file mode 100644
index 0000000000..6ef25c4e60
--- /dev/null
+++ b/deps/rabbit/src/rabbit_binding.erl
@@ -0,0 +1,691 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_binding).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([recover/0, recover/2, exists/1, add/2, add/3, remove/1, remove/2, remove/3, remove/4]).
+-export([list/1, list_for_source/1, list_for_destination/1,
+ list_for_source_and_destination/2, list_explicit/0]).
+-export([new_deletions/0, combine_deletions/2, add_deletion/3,
+ process_deletions/2, binding_action/3]).
+-export([info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4]).
+%% these must all be run inside a mnesia tx
+-export([has_for_source/1, remove_for_source/1,
+ remove_for_destination/2, remove_transient_for_destination/1,
+ remove_default_exchange_binding_rows_of/1]).
+
+-export([implicit_for_destination/1, reverse_binding/1]).
+-export([new/4]).
+
+-define(DEFAULT_EXCHANGE(VHostPath), #resource{virtual_host = VHostPath,
+ kind = exchange,
+ name = <<>>}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([key/0, deletions/0]).
+
+-type key() :: binary().
+
+-type bind_errors() :: rabbit_types:error(
+ {'resources_missing',
+ [{'not_found', (rabbit_types:binding_source() |
+ rabbit_types:binding_destination())} |
+ {'absent', amqqueue:amqqueue()}]}).
+
+-type bind_ok_or_error() :: 'ok' | bind_errors() |
+ rabbit_types:error(
+ {'binding_invalid', string(), [any()]}).
+-type bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error()).
+-type inner_fun() ::
+ fun((rabbit_types:exchange(),
+ rabbit_types:exchange() | amqqueue:amqqueue()) ->
+ rabbit_types:ok_or_error(rabbit_types:amqp_error())).
+-type bindings() :: [rabbit_types:binding()].
+
+%% TODO this should really be opaque but that seems to confuse 17.1's
+%% dialyzer into objecting to everything that uses it.
+-type deletions() :: dict:dict().
+
+%%----------------------------------------------------------------------------
+
+-spec new(rabbit_types:exchange(),
+ key(),
+ rabbit_types:exchange() | amqqueue:amqqueue(),
+ rabbit_framing:amqp_table()) ->
+ rabbit_types:binding().
+
+new(Src, RoutingKey, Dst, #{}) ->
+ new(Src, RoutingKey, Dst, []);
+new(Src, RoutingKey, Dst, Arguments) when is_map(Arguments) ->
+ new(Src, RoutingKey, Dst, maps:to_list(Arguments));
+new(Src, RoutingKey, Dst, Arguments) ->
+ #binding{source = Src, key = RoutingKey, destination = Dst, args = Arguments}.
+
+
+-define(INFO_KEYS, [source_name, source_kind,
+ destination_name, destination_kind,
+ routing_key, arguments,
+ vhost]).
+
+%% Global table recovery
+
+-spec recover([rabbit_exchange:name()], [rabbit_amqqueue:name()]) ->
+ 'ok'.
+
+recover() ->
+ rabbit_misc:table_filter(
+ fun (Route) ->
+ mnesia:read({rabbit_semi_durable_route, Route}) =:= []
+ end,
+ fun (Route, true) ->
+ ok = mnesia:write(rabbit_semi_durable_route, Route, write);
+ (_Route, false) ->
+ ok
+ end, rabbit_durable_route).
+
+%% Virtual host-specific recovery
+recover(XNames, QNames) ->
+ XNameSet = sets:from_list(XNames),
+ QNameSet = sets:from_list(QNames),
+ SelectSet = fun (#resource{kind = exchange}) -> XNameSet;
+ (#resource{kind = queue}) -> QNameSet
+ end,
+ {ok, Gatherer} = gatherer:start_link(),
+ [recover_semi_durable_route(Gatherer, R, SelectSet(Dst)) ||
+ R = #route{binding = #binding{destination = Dst}} <-
+ rabbit_misc:dirty_read_all(rabbit_semi_durable_route)],
+ empty = gatherer:out(Gatherer),
+ ok = gatherer:stop(Gatherer),
+ ok.
+
+recover_semi_durable_route(Gatherer, R = #route{binding = B}, ToRecover) ->
+ #binding{source = Src, destination = Dst} = B,
+ case sets:is_element(Dst, ToRecover) of
+ true -> {ok, X} = rabbit_exchange:lookup(Src),
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () ->
+ recover_semi_durable_route_txn(R, X),
+ gatherer:finish(Gatherer)
+ end);
+ false -> ok
+ end.
+
+recover_semi_durable_route_txn(R = #route{binding = B}, X) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:read(rabbit_semi_durable_route, B, read) of
+ [] -> no_recover;
+ _ -> ok = sync_transient_route(R, fun mnesia:write/3),
+ rabbit_exchange:serial(X)
+ end
+ end,
+ fun (no_recover, _) -> ok;
+ (_Serial, true) -> x_callback(transaction, X, add_binding, B);
+ (Serial, false) -> x_callback(Serial, X, add_binding, B)
+ end).
+
+-spec exists(rabbit_types:binding()) -> boolean() | bind_errors().
+
+exists(#binding{source = ?DEFAULT_EXCHANGE(_),
+ destination = #resource{kind = queue, name = QName} = Queue,
+ key = QName,
+ args = []}) ->
+ case rabbit_amqqueue:lookup(Queue) of
+ {ok, _} -> true;
+ {error, not_found} -> false
+ end;
+exists(Binding) ->
+ binding_action(
+ Binding, fun (_Src, _Dst, B) ->
+ rabbit_misc:const(mnesia:read({rabbit_route, B}) /= [])
+ end, fun not_found_or_absent_errs/1).
+
+-spec add(rabbit_types:binding(), rabbit_types:username()) -> bind_res().
+
+add(Binding, ActingUser) -> add(Binding, fun (_Src, _Dst) -> ok end, ActingUser).
+
+-spec add(rabbit_types:binding(), inner_fun(), rabbit_types:username()) -> bind_res().
+
+add(Binding, InnerFun, ActingUser) ->
+ binding_action(
+ Binding,
+ fun (Src, Dst, B) ->
+ case rabbit_exchange:validate_binding(Src, B) of
+ ok ->
+ lock_resource(Src, read),
+ lock_resource(Dst, read),
+ %% this argument is used to check queue exclusivity;
+ %% in general, we want to fail on that in preference to
+ %% anything else
+ case InnerFun(Src, Dst) of
+ ok ->
+ case mnesia:read({rabbit_route, B}) of
+ [] -> add(Src, Dst, B, ActingUser);
+ [_] -> fun () -> ok end
+ end;
+ {error, _} = Err ->
+ rabbit_misc:const(Err)
+ end;
+ {error, _} = Err ->
+ rabbit_misc:const(Err)
+ end
+ end, fun not_found_or_absent_errs/1).
+
+add(Src, Dst, B, ActingUser) ->
+ [SrcDurable, DstDurable] = [durable(E) || E <- [Src, Dst]],
+ ok = sync_route(#route{binding = B}, SrcDurable, DstDurable,
+ fun mnesia:write/3),
+ x_callback(transaction, Src, add_binding, B),
+ Serial = rabbit_exchange:serial(Src),
+ fun () ->
+ x_callback(Serial, Src, add_binding, B),
+ ok = rabbit_event:notify(
+ binding_created,
+ info(B) ++ [{user_who_performed_action, ActingUser}])
+ end.
+
+-spec remove(rabbit_types:binding()) -> bind_res().
+remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end, ?INTERNAL_USER).
+
+-spec remove(rabbit_types:binding(), rabbit_types:username()) -> bind_res().
+remove(Binding, ActingUser) -> remove(Binding, fun (_Src, _Dst) -> ok end, ActingUser).
+
+
+-spec remove(rabbit_types:binding(), inner_fun(), rabbit_types:username()) -> bind_res().
+remove(Binding, InnerFun, ActingUser) ->
+ binding_action(
+ Binding,
+ fun (Src, Dst, B) ->
+ lock_resource(Src, read),
+ lock_resource(Dst, read),
+ case mnesia:read(rabbit_route, B, write) of
+ [] -> case mnesia:read(rabbit_durable_route, B, write) of
+ [] -> rabbit_misc:const(ok);
+ %% We still delete the binding and run
+ %% all post-delete functions if there is only
+ %% a durable route in the database
+ _ -> remove(Src, Dst, B, ActingUser)
+ end;
+ _ -> case InnerFun(Src, Dst) of
+ ok -> remove(Src, Dst, B, ActingUser);
+ {error, _} = Err -> rabbit_misc:const(Err)
+ end
+ end
+ end, fun absent_errs_only/1).
+
+remove(Src, Dst, B, ActingUser) ->
+ ok = sync_route(#route{binding = B}, durable(Src), durable(Dst),
+ fun delete/3),
+ Deletions = maybe_auto_delete(
+ B#binding.source, [B], new_deletions(), false),
+ process_deletions(Deletions, ActingUser).
+
+%% Implicit bindings are implicit as of rabbitmq/rabbitmq-server#1721.
+remove_default_exchange_binding_rows_of(Dst = #resource{}) ->
+ case implicit_for_destination(Dst) of
+ [Binding] ->
+ mnesia:dirty_delete(rabbit_durable_route, Binding),
+ mnesia:dirty_delete(rabbit_semi_durable_route, Binding),
+ mnesia:dirty_delete(rabbit_reverse_route,
+ reverse_binding(Binding)),
+ mnesia:dirty_delete(rabbit_route, Binding);
+ _ ->
+ %% no binding to remove or
+ %% a competing tx has beaten us to it?
+ ok
+ end,
+ ok.
+
+-spec list_explicit() -> bindings().
+
+list_explicit() ->
+ mnesia:async_dirty(
+ fun () ->
+ AllRoutes = mnesia:dirty_match_object(rabbit_route, #route{_ = '_'}),
+ %% if there are any default exchange bindings left after an upgrade
+ %% of a pre-3.8 database, filter them out
+ AllBindings = [B || #route{binding = B} <- AllRoutes],
+ lists:filter(fun(#binding{source = S}) ->
+ not (S#resource.kind =:= exchange andalso S#resource.name =:= <<>>)
+ end, AllBindings)
+ end).
+
+-spec list(rabbit_types:vhost()) -> bindings().
+
+list(VHostPath) ->
+ VHostResource = rabbit_misc:r(VHostPath, '_'),
+ Route = #route{binding = #binding{source = VHostResource,
+ destination = VHostResource,
+ _ = '_'},
+ _ = '_'},
+ %% if there are any default exchange bindings left after an upgrade
+ %% of a pre-3.8 database, filter them out
+ AllBindings = [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route,
+ Route)],
+ Filtered = lists:filter(fun(#binding{source = S}) ->
+ S =/= ?DEFAULT_EXCHANGE(VHostPath)
+ end, AllBindings),
+ implicit_bindings(VHostPath) ++ Filtered.
+
+-spec list_for_source
+ (rabbit_types:binding_source()) -> bindings().
+
+list_for_source(?DEFAULT_EXCHANGE(VHostPath)) ->
+ implicit_bindings(VHostPath);
+list_for_source(SrcName) ->
+ mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{source = SrcName, _ = '_'}},
+ [B || #route{binding = B}
+ <- mnesia:match_object(rabbit_route, Route, read)]
+ end).
+
+-spec list_for_destination
+ (rabbit_types:binding_destination()) -> bindings().
+
+list_for_destination(DstName = #resource{virtual_host = VHostPath}) ->
+ AllBindings = mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{destination = DstName,
+ _ = '_'}},
+ [reverse_binding(B) ||
+ #reverse_route{reverse_binding = B} <-
+ mnesia:match_object(rabbit_reverse_route,
+ reverse_route(Route), read)]
+ end),
+ Filtered = lists:filter(fun(#binding{source = S}) ->
+ S =/= ?DEFAULT_EXCHANGE(VHostPath)
+ end, AllBindings),
+ implicit_for_destination(DstName) ++ Filtered.
+
+implicit_bindings(VHostPath) ->
+ DstQueues = rabbit_amqqueue:list_names(VHostPath),
+ [ #binding{source = ?DEFAULT_EXCHANGE(VHostPath),
+ destination = DstQueue,
+ key = QName,
+ args = []}
+ || DstQueue = #resource{name = QName} <- DstQueues ].
+
+implicit_for_destination(DstQueue = #resource{kind = queue,
+ virtual_host = VHostPath,
+ name = QName}) ->
+ [#binding{source = ?DEFAULT_EXCHANGE(VHostPath),
+ destination = DstQueue,
+ key = QName,
+ args = []}];
+implicit_for_destination(_) ->
+ [].
+
+-spec list_for_source_and_destination
+ (rabbit_types:binding_source(), rabbit_types:binding_destination()) ->
+ bindings().
+
+list_for_source_and_destination(?DEFAULT_EXCHANGE(VHostPath),
+ #resource{kind = queue,
+ virtual_host = VHostPath,
+ name = QName} = DstQueue) ->
+ [#binding{source = ?DEFAULT_EXCHANGE(VHostPath),
+ destination = DstQueue,
+ key = QName,
+ args = []}];
+list_for_source_and_destination(SrcName, DstName) ->
+ mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{source = SrcName,
+ destination = DstName,
+ _ = '_'}},
+ [B || #route{binding = B} <- mnesia:match_object(rabbit_route,
+ Route, read)]
+ end).
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
+
+map(VHostPath, F) ->
+ %% TODO: there is scope for optimisation here, e.g. using a
+ %% cursor, parallelising the function invocation
+ lists:map(F, list(VHostPath)).
+
+infos(Items, B) -> [{Item, i(Item, B)} || Item <- Items].
+
+i(source_name, #binding{source = SrcName}) -> SrcName#resource.name;
+i(source_kind, #binding{source = SrcName}) -> SrcName#resource.kind;
+i(vhost, #binding{source = SrcName}) -> SrcName#resource.virtual_host;
+i(destination_name, #binding{destination = DstName}) -> DstName#resource.name;
+i(destination_kind, #binding{destination = DstName}) -> DstName#resource.kind;
+i(routing_key, #binding{key = RoutingKey}) -> RoutingKey;
+i(arguments, #binding{args = Arguments}) -> Arguments;
+i(Item, _) -> throw({bad_argument, Item}).
+
+-spec info(rabbit_types:binding()) -> rabbit_types:infos().
+
+info(B = #binding{}) -> infos(?INFO_KEYS, B).
+
+-spec info(rabbit_types:binding(), rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+
+info(B = #binding{}, Items) -> infos(Items, B).
+
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+info_all(VHostPath) -> map(VHostPath, fun (B) -> info(B) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) ->
+ [rabbit_types:infos()].
+
+info_all(VHostPath, Items) -> map(VHostPath, fun (B) -> info(B, Items) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys(),
+ reference(), pid()) -> 'ok'.
+
+info_all(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(B) -> info(B, Items) end, list(VHostPath)).
+
+-spec has_for_source(rabbit_types:binding_source()) -> boolean().
+
+has_for_source(SrcName) ->
+ Match = #route{binding = #binding{source = SrcName, _ = '_'}},
+ %% we need to check for semi-durable routes (which subsumes
+ %% durable routes) here too in case a bunch of routes to durable
+ %% queues have been removed temporarily as a result of a node
+ %% failure
+ contains(rabbit_route, Match) orelse
+ contains(rabbit_semi_durable_route, Match).
+
+-spec remove_for_source(rabbit_types:binding_source()) -> bindings().
+
+remove_for_source(SrcName) ->
+ lock_resource(SrcName),
+ Match = #route{binding = #binding{source = SrcName, _ = '_'}},
+ remove_routes(
+ lists:usort(
+ mnesia:dirty_match_object(rabbit_route, Match) ++
+ mnesia:dirty_match_object(rabbit_semi_durable_route, Match))).
+
+-spec remove_for_destination
+ (rabbit_types:binding_destination(), boolean()) -> deletions().
+
+remove_for_destination(DstName, OnlyDurable) ->
+ remove_for_destination(DstName, OnlyDurable, fun remove_routes/1).
+
+-spec remove_transient_for_destination
+ (rabbit_types:binding_destination()) -> deletions().
+
+remove_transient_for_destination(DstName) ->
+ remove_for_destination(DstName, false, fun remove_transient_routes/1).
+
+%%----------------------------------------------------------------------------
+
+durable(#exchange{durable = D}) -> D;
+durable(Q) when ?is_amqqueue(Q) ->
+ amqqueue:is_durable(Q).
+
+binding_action(Binding = #binding{source = SrcName,
+ destination = DstName,
+ args = Arguments}, Fun, ErrFun) ->
+ call_with_source_and_destination(
+ SrcName, DstName,
+ fun (Src, Dst) ->
+ SortedArgs = rabbit_misc:sort_field_table(Arguments),
+ Fun(Src, Dst, Binding#binding{args = SortedArgs})
+ end, ErrFun).
+
+sync_route(Route, true, true, Fun) ->
+ ok = Fun(rabbit_durable_route, Route, write),
+ sync_route(Route, false, true, Fun);
+
+sync_route(Route, false, true, Fun) ->
+ ok = Fun(rabbit_semi_durable_route, Route, write),
+ sync_route(Route, false, false, Fun);
+
+sync_route(Route, _SrcDurable, false, Fun) ->
+ sync_transient_route(Route, Fun).
+
+sync_transient_route(Route, Fun) ->
+ ok = Fun(rabbit_route, Route, write),
+ ok = Fun(rabbit_reverse_route, reverse_route(Route), write).
+
+call_with_source_and_destination(SrcName, DstName, Fun, ErrFun) ->
+ SrcTable = table_for_resource(SrcName),
+ DstTable = table_for_resource(DstName),
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case {mnesia:read({SrcTable, SrcName}),
+ mnesia:read({DstTable, DstName})} of
+ {[Src], [Dst]} -> Fun(Src, Dst);
+ {[], [_] } -> ErrFun([SrcName]);
+ {[_], [] } -> ErrFun([DstName]);
+ {[], [] } -> ErrFun([SrcName, DstName])
+ end
+ end).
+
+not_found_or_absent_errs(Names) ->
+ Errs = [not_found_or_absent(Name) || Name <- Names],
+ rabbit_misc:const({error, {resources_missing, Errs}}).
+
+absent_errs_only(Names) ->
+ Errs = [E || Name <- Names,
+ {absent, _Q, _Reason} = E <- [not_found_or_absent(Name)]],
+ rabbit_misc:const(case Errs of
+ [] -> ok;
+ _ -> {error, {resources_missing, Errs}}
+ end).
+
+table_for_resource(#resource{kind = exchange}) -> rabbit_exchange;
+table_for_resource(#resource{kind = queue}) -> rabbit_queue.
+
+not_found_or_absent(#resource{kind = exchange} = Name) ->
+ {not_found, Name};
+not_found_or_absent(#resource{kind = queue} = Name) ->
+ case rabbit_amqqueue:not_found_or_absent(Name) of
+ not_found -> {not_found, Name};
+ {absent, _Q, _Reason} = R -> R
+ end.
+
+contains(Table, MatchHead) ->
+ continue(mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read)).
+
+continue('$end_of_table') -> false;
+continue({[_|_], _}) -> true;
+continue({[], Continuation}) -> continue(mnesia:select(Continuation)).
+
+remove_routes(Routes) ->
+ %% This partitioning allows us to suppress unnecessary delete
+ %% operations on disk tables, which require an fsync.
+ {RamRoutes, DiskRoutes} =
+ lists:partition(fun (R) -> mnesia:read(
+ rabbit_durable_route, R#route.binding, read) == [] end,
+ Routes),
+ {RamOnlyRoutes, SemiDurableRoutes} =
+ lists:partition(fun (R) -> mnesia:read(
+ rabbit_semi_durable_route, R#route.binding, read) == [] end,
+ RamRoutes),
+ %% Of course the destination might not really be durable but it's
+ %% just as easy to try to delete it from the semi-durable table
+ %% than check first
+ [ok = sync_route(R, true, true, fun delete/3) ||
+ R <- DiskRoutes],
+ [ok = sync_route(R, false, true, fun delete/3) ||
+ R <- SemiDurableRoutes],
+ [ok = sync_route(R, false, false, fun delete/3) ||
+ R <- RamOnlyRoutes],
+ [R#route.binding || R <- Routes].
+
+
+delete(Tab, #route{binding = B}, LockKind) ->
+ mnesia:delete(Tab, B, LockKind);
+delete(Tab, #reverse_route{reverse_binding = B}, LockKind) ->
+ mnesia:delete(Tab, B, LockKind).
+
+remove_transient_routes(Routes) ->
+ [begin
+ ok = sync_transient_route(R, fun delete/3),
+ R#route.binding
+ end || R <- Routes].
+
+remove_for_destination(DstName, OnlyDurable, Fun) ->
+ lock_resource(DstName),
+ MatchFwd = #route{binding = #binding{destination = DstName, _ = '_'}},
+ MatchRev = reverse_route(MatchFwd),
+ Routes = case OnlyDurable of
+ false ->
+ [reverse_route(R) ||
+ R <- mnesia:dirty_match_object(
+ rabbit_reverse_route, MatchRev)];
+ true -> lists:usort(
+ mnesia:dirty_match_object(
+ rabbit_durable_route, MatchFwd) ++
+ mnesia:dirty_match_object(
+ rabbit_semi_durable_route, MatchFwd))
+ end,
+ Bindings = Fun(Routes),
+ group_bindings_fold(fun maybe_auto_delete/4, new_deletions(),
+ lists:keysort(#binding.source, Bindings), OnlyDurable).
+
+%% Instead of locking entire table on remove operations we can lock the
+%% affected resource only.
+lock_resource(Name) -> lock_resource(Name, write).
+
+lock_resource(Name, LockKind) ->
+ mnesia:lock({global, Name, mnesia:table_info(rabbit_route, where_to_write)},
+ LockKind).
+
+%% Requires that its input binding list is sorted in exchange-name
+%% order, so that the grouping of bindings (for passing to
+%% group_bindings_and_auto_delete1) works properly.
+group_bindings_fold(_Fun, Acc, [], _OnlyDurable) ->
+ Acc;
+group_bindings_fold(Fun, Acc, [B = #binding{source = SrcName} | Bs],
+ OnlyDurable) ->
+ group_bindings_fold(Fun, SrcName, Acc, Bs, [B], OnlyDurable).
+
+group_bindings_fold(
+ Fun, SrcName, Acc, [B = #binding{source = SrcName} | Bs], Bindings,
+ OnlyDurable) ->
+ group_bindings_fold(Fun, SrcName, Acc, Bs, [B | Bindings], OnlyDurable);
+group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings, OnlyDurable) ->
+ %% Either Removed is [], or its head has a non-matching SrcName.
+ group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc, OnlyDurable), Removed,
+ OnlyDurable).
+
+maybe_auto_delete(XName, Bindings, Deletions, OnlyDurable) ->
+ {Entry, Deletions1} =
+ case mnesia:read({case OnlyDurable of
+ true -> rabbit_durable_exchange;
+ false -> rabbit_exchange
+ end, XName}) of
+ [] -> {{undefined, not_deleted, Bindings}, Deletions};
+ [X] -> case rabbit_exchange:maybe_auto_delete(X, OnlyDurable) of
+ not_deleted ->
+ {{X, not_deleted, Bindings}, Deletions};
+ {deleted, Deletions2} ->
+ {{X, deleted, Bindings},
+ combine_deletions(Deletions, Deletions2)}
+ end
+ end,
+ add_deletion(XName, Entry, Deletions1).
+
+reverse_route(#route{binding = Binding}) ->
+ #reverse_route{reverse_binding = reverse_binding(Binding)};
+
+reverse_route(#reverse_route{reverse_binding = Binding}) ->
+ #route{binding = reverse_binding(Binding)}.
+
+reverse_binding(#reverse_binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}) ->
+ #binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args};
+
+reverse_binding(#binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}) ->
+ #reverse_binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}.
+
+%% ----------------------------------------------------------------------------
+%% Binding / exchange deletion abstraction API
+%% ----------------------------------------------------------------------------
+
+anything_but( NotThis, NotThis, NotThis) -> NotThis;
+anything_but( NotThis, NotThis, This) -> This;
+anything_but( NotThis, This, NotThis) -> This;
+anything_but(_NotThis, This, This) -> This.
+
+-spec new_deletions() -> deletions().
+
+new_deletions() -> dict:new().
+
+-spec add_deletion
+ (rabbit_exchange:name(),
+ {'undefined' | rabbit_types:exchange(),
+ 'deleted' | 'not_deleted',
+ bindings()},
+ deletions()) ->
+ deletions().
+
+add_deletion(XName, Entry, Deletions) ->
+ dict:update(XName, fun (Entry1) -> merge_entry(Entry1, Entry) end,
+ Entry, Deletions).
+
+-spec combine_deletions(deletions(), deletions()) -> deletions().
+
+combine_deletions(Deletions1, Deletions2) ->
+ dict:merge(fun (_XName, Entry1, Entry2) -> merge_entry(Entry1, Entry2) end,
+ Deletions1, Deletions2).
+
+merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) ->
+ {anything_but(undefined, X1, X2),
+ anything_but(not_deleted, Deleted1, Deleted2),
+ [Bindings1 | Bindings2]}.
+
+-spec process_deletions(deletions(), rabbit_types:username()) -> rabbit_misc:thunk('ok').
+
+process_deletions(Deletions, ActingUser) ->
+ AugmentedDeletions =
+ dict:map(fun (_XName, {X, deleted, Bindings}) ->
+ Bs = lists:flatten(Bindings),
+ x_callback(transaction, X, delete, Bs),
+ {X, deleted, Bs, none};
+ (_XName, {X, not_deleted, Bindings}) ->
+ Bs = lists:flatten(Bindings),
+ x_callback(transaction, X, remove_bindings, Bs),
+ {X, not_deleted, Bs, rabbit_exchange:serial(X)}
+ end, Deletions),
+ fun() ->
+ dict:fold(fun (XName, {X, deleted, Bs, Serial}, ok) ->
+ ok = rabbit_event:notify(
+ exchange_deleted,
+ [{name, XName},
+ {user_who_performed_action, ActingUser}]),
+ del_notify(Bs, ActingUser),
+ x_callback(Serial, X, delete, Bs);
+ (_XName, {X, not_deleted, Bs, Serial}, ok) ->
+ del_notify(Bs, ActingUser),
+ x_callback(Serial, X, remove_bindings, Bs)
+ end, ok, AugmentedDeletions)
+ end.
+
+del_notify(Bs, ActingUser) -> [rabbit_event:notify(
+ binding_deleted,
+ info(B) ++ [{user_who_performed_action, ActingUser}])
+ || B <- Bs].
+
+x_callback(Serial, X, F, Bs) ->
+ ok = rabbit_exchange:callback(X, F, Serial, [X, Bs]).
diff --git a/deps/rabbit/src/rabbit_boot_steps.erl b/deps/rabbit/src/rabbit_boot_steps.erl
new file mode 100644
index 0000000000..f87448edb7
--- /dev/null
+++ b/deps/rabbit/src/rabbit_boot_steps.erl
@@ -0,0 +1,91 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_boot_steps).
+
+-export([run_boot_steps/0, run_boot_steps/1, run_cleanup_steps/1]).
+-export([find_steps/0, find_steps/1]).
+
+run_boot_steps() ->
+ run_boot_steps(loaded_applications()).
+
+run_boot_steps(Apps) ->
+ [begin
+ rabbit_log:info("Running boot step ~s defined by app ~s", [Step, App]),
+ ok = run_step(Attrs, mfa)
+ end || {App, Step, Attrs} <- find_steps(Apps)],
+ ok.
+
+run_cleanup_steps(Apps) ->
+ [run_step(Attrs, cleanup) || {_, _, Attrs} <- find_steps(Apps)],
+ ok.
+
+loaded_applications() ->
+ [App || {App, _, _} <- application:loaded_applications()].
+
+find_steps() ->
+ find_steps(loaded_applications()).
+
+find_steps(Apps) ->
+ All = sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)),
+ [Step || {App, _, _} = Step <- All, lists:member(App, Apps)].
+
+run_step(Attributes, AttributeName) ->
+ [begin
+ rabbit_log:debug("Applying MFA: M = ~s, F = ~s, A = ~p",
+ [M, F, A]),
+ case apply(M,F,A) of
+ ok -> ok;
+ {error, Reason} -> exit({error, Reason})
+ end
+ end
+ || {Key, {M,F,A}} <- Attributes,
+ Key =:= AttributeName],
+ ok.
+
+vertices({AppName, _Module, Steps}) ->
+ [{StepName, {AppName, StepName, Atts}} || {StepName, Atts} <- Steps].
+
+edges({_AppName, _Module, Steps}) ->
+ EnsureList = fun (L) when is_list(L) -> L;
+ (T) -> [T]
+ end,
+ [case Key of
+ requires -> {StepName, OtherStep};
+ enables -> {OtherStep, StepName}
+ end || {StepName, Atts} <- Steps,
+ {Key, OtherStepOrSteps} <- Atts,
+ OtherStep <- EnsureList(OtherStepOrSteps),
+ Key =:= requires orelse Key =:= enables].
+
+sort_boot_steps(UnsortedSteps) ->
+ case rabbit_misc:build_acyclic_graph(fun vertices/1, fun edges/1,
+ UnsortedSteps) of
+ {ok, G} ->
+ %% Use topological sort to find a consistent ordering (if
+ %% there is one, otherwise fail).
+ SortedSteps = lists:reverse(
+ [begin
+ {StepName, Step} = digraph:vertex(G,
+ StepName),
+ Step
+ end || StepName <- digraph_utils:topsort(G)]),
+ digraph:delete(G),
+ %% Check that all mentioned {M,F,A} triples are exported.
+ case [{StepName, {M,F,A}} ||
+ {_App, StepName, Attributes} <- SortedSteps,
+ {mfa, {M,F,A}} <- Attributes,
+ code:ensure_loaded(M) =/= {module, M} orelse
+ not erlang:function_exported(M, F, length(A))] of
+ [] -> SortedSteps;
+ MissingFns -> exit({boot_functions_not_exported, MissingFns})
+ end;
+ {error, {vertex, duplicate, StepName}} ->
+ exit({duplicate_boot_step, StepName});
+ {error, {edge, Reason, From, To}} ->
+ exit({invalid_boot_step_dependency, From, To, Reason})
+ end.
diff --git a/deps/rabbit/src/rabbit_channel.erl b/deps/rabbit/src/rabbit_channel.erl
new file mode 100644
index 0000000000..8e7828a7c0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel.erl
@@ -0,0 +1,2797 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+%% rabbit_channel processes represent an AMQP 0-9-1 channels.
+%%
+%% Connections parse protocol frames coming from clients and
+%% dispatch them to channel processes.
+%% Channels are responsible for implementing the logic behind
+%% the various protocol methods, involving other processes as
+%% needed:
+%%
+%% * Routing messages (using functions in various exchange type
+%% modules) to queue processes.
+%% * Managing queues, exchanges, and bindings.
+%% * Keeping track of consumers
+%% * Keeping track of unacknowledged deliveries to consumers
+%% * Keeping track of publisher confirms
+%% * Transaction management
+%% * Authorisation (enforcing permissions)
+%% * Publishing trace events if tracing is enabled
+%%
+%% Every channel has a number of dependent processes:
+%%
+%% * A writer which is responsible for sending frames to clients.
+%% * A limiter which controls how many messages can be delivered
+%% to consumers according to active QoS prefetch and internal
+%% flow control logic.
+%%
+%% Channels are also aware of their connection's queue collector.
+%% When a queue is declared as exclusive on a channel, the channel
+%% will notify queue collector of that queue.
+
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_misc.hrl").
+
+-include("amqqueue.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/11, start_link/12, do/2, do/3, do_flow/3, flush/1, shutdown/1]).
+-export([send_command/2, deliver/4, deliver_reply/2,
+ send_credit_reply/2, send_drained/2]).
+-export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1,
+ emit_info_all/4, info_local/1]).
+-export([refresh_config_local/0, ready_for_close/1]).
+-export([refresh_interceptors/0]).
+-export([force_event_refresh/1]).
+-export([update_user_state/2]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, handle_pre_hibernate/1, handle_post_hibernate/1,
+ prioritise_call/4, prioritise_cast/3, prioritise_info/3,
+ format_message_queue/2]).
+
+%% Internal
+-export([list_local/0, emit_info_local/3, deliver_reply_local/3]).
+-export([get_vhost/1, get_user/1]).
+%% For testing
+-export([build_topic_variable_map/3]).
+-export([list_queue_states/1, get_max_message_size/0]).
+
+%% Mgmt HTTP API refactor
+-export([handle_method/6]).
+
+-record(conf, {
+ %% starting | running | flow | closing
+ state,
+ %% same as reader's protocol. Used when instantiating
+ %% (protocol) exceptions.
+ protocol,
+ %% channel number
+ channel,
+ %% reader process
+ reader_pid,
+ %% writer process
+ writer_pid,
+ %%
+ conn_pid,
+ %% same as reader's name, see #v1.name
+ %% in rabbit_reader
+ conn_name,
+ %% channel's originating source e.g. rabbit_reader | rabbit_direct | undefined
+ %% or any other channel creating/spawning entity
+ source,
+ %% same as #v1.user in the reader, used in
+ %% authorisation checks
+ user,
+ %% same as #v1.user in the reader
+ virtual_host,
+ %% when queue.bind's queue field is empty,
+ %% this name will be used instead
+ most_recently_declared_queue,
+ %% when a queue is declared as exclusive, queue
+ %% collector must be notified.
+ %% see rabbit_queue_collector for more info.
+ queue_collector_pid,
+
+ %% same as capabilities in the reader
+ capabilities,
+ %% tracing exchange resource if tracing is enabled,
+ %% 'none' otherwise
+ trace_state,
+ consumer_prefetch,
+ %% Message content size limit
+ max_message_size,
+ consumer_timeout,
+ authz_context,
+ %% defines how ofter gc will be executed
+ writer_gc_threshold
+ }).
+
+-record(pending_ack, {delivery_tag,
+ tag,
+ delivered_at,
+ queue, %% queue name
+ msg_id}).
+
+-record(ch, {cfg :: #conf{},
+ %% limiter state, see rabbit_limiter
+ limiter,
+ %% none | {Msgs, Acks} | committing | failed |
+ tx,
+ %% (consumer) delivery tag sequence
+ next_tag,
+ %% messages pending consumer acknowledgement
+ unacked_message_q,
+ %% queue processes are monitored to update
+ %% queue names
+ queue_monitors,
+ %% a map of consumer tags to
+ %% consumer details: #amqqueue record, acknowledgement mode,
+ %% consumer exclusivity, etc
+ consumer_mapping,
+ %% a map of queue names to consumer tag lists
+ queue_consumers,
+ %% timer used to emit statistics
+ stats_timer,
+ %% are publisher confirms enabled for this channel?
+ confirm_enabled,
+ %% publisher confirm delivery tag sequence
+ publish_seqno,
+ %% an unconfirmed_messages data structure used to track unconfirmed
+ %% (to publishers) messages
+ unconfirmed,
+ %% a list of tags for published messages that were
+ %% delivered but are yet to be confirmed to the client
+ confirmed,
+ %% a list of tags for published messages that were
+ %% rejected but are yet to be sent to the client
+ rejected,
+ %% used by "one shot RPC" (amq.
+ reply_consumer,
+ %% flow | noflow, see rabbitmq-server#114
+ delivery_flow,
+ interceptor_state,
+ queue_states,
+ tick_timer
+ }).
+
+-define(QUEUE, lqueue).
+
+-define(MAX_PERMISSION_CACHE_SIZE, 12).
+
+-define(REFRESH_TIMEOUT, 15000).
+
+-define(STATISTICS_KEYS,
+ [reductions,
+ pid,
+ transactional,
+ confirm,
+ consumer_count,
+ messages_unacknowledged,
+ messages_unconfirmed,
+ messages_uncommitted,
+ acks_uncommitted,
+ pending_raft_commands,
+ prefetch_count,
+ global_prefetch_count,
+ state,
+ garbage_collection]).
+
+
+-define(CREATION_EVENT_KEYS,
+ [pid,
+ name,
+ connection,
+ number,
+ user,
+ vhost,
+ user_who_performed_action]).
+
+-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
+
+-define(INCR_STATS(Type, Key, Inc, Measure, State),
+ case rabbit_event:stats_level(State, #ch.stats_timer) of
+ fine ->
+ rabbit_core_metrics:channel_stats(Type, Measure, {self(), Key}, Inc),
+ %% Keys in the process dictionary are used to clean up the core metrics
+ put({Type, Key}, none);
+ _ ->
+ ok
+ end).
+
+-define(INCR_STATS(Type, Key, Inc, Measure),
+ begin
+ rabbit_core_metrics:channel_stats(Type, Measure, {self(), Key}, Inc),
+ %% Keys in the process dictionary are used to clean up the core metrics
+ put({Type, Key}, none)
+ end).
+
+%%----------------------------------------------------------------------------
+
+-export_type([channel_number/0]).
+
+-type channel_number() :: non_neg_integer().
+
+-export_type([channel/0]).
+
+-type channel() :: #ch{}.
+
+%%----------------------------------------------------------------------------
+
+-spec start_link
+ (channel_number(), pid(), pid(), pid(), string(), rabbit_types:protocol(),
+ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
+ pid(), pid()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
+ VHost, Capabilities, CollectorPid, Limiter) ->
+ start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
+ VHost, Capabilities, CollectorPid, Limiter, undefined).
+
+-spec start_link
+ (channel_number(), pid(), pid(), pid(), string(), rabbit_types:protocol(),
+ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
+ pid(), pid(), any()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
+ VHost, Capabilities, CollectorPid, Limiter, AmqpParams) ->
+ gen_server2:start_link(
+ ?MODULE, [Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, CollectorPid, Limiter, AmqpParams], []).
+
+-spec do(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+
+do(Pid, Method) ->
+ rabbit_channel_common:do(Pid, Method).
+
+-spec do
+ (pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:maybe(rabbit_types:content())) ->
+ 'ok'.
+
+do(Pid, Method, Content) ->
+ rabbit_channel_common:do(Pid, Method, Content).
+
+-spec do_flow
+ (pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:maybe(rabbit_types:content())) ->
+ 'ok'.
+
+do_flow(Pid, Method, Content) ->
+ rabbit_channel_common:do_flow(Pid, Method, Content).
+
+-spec flush(pid()) -> 'ok'.
+
+flush(Pid) ->
+ gen_server2:call(Pid, flush, infinity).
+
+-spec shutdown(pid()) -> 'ok'.
+
+shutdown(Pid) ->
+ gen_server2:cast(Pid, terminate).
+
+-spec send_command(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+
+send_command(Pid, Msg) ->
+ gen_server2:cast(Pid, {command, Msg}).
+
+-spec deliver
+ (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg()) -> 'ok'.
+
+deliver(Pid, ConsumerTag, AckRequired, Msg) ->
+ gen_server2:cast(Pid, {deliver, ConsumerTag, AckRequired, Msg}).
+
+-spec deliver_reply(binary(), rabbit_types:delivery()) -> 'ok'.
+
+deliver_reply(<<"amq.rabbitmq.reply-to.", Rest/binary>>, Delivery) ->
+ case decode_fast_reply_to(Rest) of
+ {ok, Pid, Key} ->
+ delegate:invoke_no_result(
+ Pid, {?MODULE, deliver_reply_local, [Key, Delivery]});
+ error ->
+ ok
+ end.
+
+%% We want to ensure people can't use this mechanism to send a message
+%% to an arbitrary process and kill it!
+
+-spec deliver_reply_local(pid(), binary(), rabbit_types:delivery()) -> 'ok'.
+
+deliver_reply_local(Pid, Key, Delivery) ->
+ case pg_local:in_group(rabbit_channels, Pid) of
+ true -> gen_server2:cast(Pid, {deliver_reply, Key, Delivery});
+ false -> ok
+ end.
+
+declare_fast_reply_to(<<"amq.rabbitmq.reply-to">>) ->
+ exists;
+declare_fast_reply_to(<<"amq.rabbitmq.reply-to.", Rest/binary>>) ->
+ case decode_fast_reply_to(Rest) of
+ {ok, Pid, Key} ->
+ Msg = {declare_fast_reply_to, Key},
+ rabbit_misc:with_exit_handler(
+ rabbit_misc:const(not_found),
+ fun() -> gen_server2:call(Pid, Msg, infinity) end);
+ error ->
+ not_found
+ end;
+declare_fast_reply_to(_) ->
+ not_found.
+
+decode_fast_reply_to(Rest) ->
+ case string:tokens(binary_to_list(Rest), ".") of
+ [PidEnc, Key] -> Pid = binary_to_term(base64:decode(PidEnc)),
+ {ok, Pid, Key};
+ _ -> error
+ end.
+
+-spec send_credit_reply(pid(), non_neg_integer()) -> 'ok'.
+
+send_credit_reply(Pid, Len) ->
+ gen_server2:cast(Pid, {send_credit_reply, Len}).
+
+-spec send_drained(pid(), [{rabbit_types:ctag(), non_neg_integer()}]) -> 'ok'.
+
+send_drained(Pid, CTagCredit) ->
+ gen_server2:cast(Pid, {send_drained, CTagCredit}).
+
+-spec list() -> [pid()].
+
+list() ->
+ Nodes = rabbit_nodes:all_running(),
+ rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_channel, list_local, [], ?RPC_TIMEOUT).
+
+-spec list_local() -> [pid()].
+
+list_local() ->
+ pg_local:get_members(rabbit_channels).
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
+
+-spec info(pid()) -> rabbit_types:infos().
+
+info(Pid) ->
+ {Timeout, Deadline} = get_operation_timeout_and_deadline(),
+ try
+ case gen_server2:call(Pid, {info, Deadline}, Timeout) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end
+ catch
+ exit:{timeout, _} ->
+ rabbit_log:error("Timed out getting channel ~p info", [Pid]),
+ throw(timeout)
+ end.
+
+-spec info(pid(), rabbit_types:info_keys()) -> rabbit_types:infos().
+
+info(Pid, Items) ->
+ {Timeout, Deadline} = get_operation_timeout_and_deadline(),
+ try
+ case gen_server2:call(Pid, {{info, Items}, Deadline}, Timeout) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end
+ catch
+ exit:{timeout, _} ->
+ rabbit_log:error("Timed out getting channel ~p info", [Pid]),
+ throw(timeout)
+ end.
+
+-spec info_all() -> [rabbit_types:infos()].
+
+info_all() ->
+ rabbit_misc:filter_exit_map(fun (C) -> info(C) end, list()).
+
+-spec info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()].
+
+info_all(Items) ->
+ rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()).
+
+info_local(Items) ->
+ rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list_local()).
+
+emit_info_all(Nodes, Items, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_channel, emit_info_local, [Items, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids).
+
+emit_info_local(Items, Ref, AggregatorPid) ->
+ emit_info(list_local(), Items, Ref, AggregatorPid).
+
+emit_info(PidList, InfoItems, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(C) -> info(C, InfoItems) end, PidList).
+
+-spec refresh_config_local() -> 'ok'.
+
+refresh_config_local() ->
+ rabbit_misc:upmap(
+ fun (C) ->
+ try
+ gen_server2:call(C, refresh_config, infinity)
+ catch _:Reason ->
+ rabbit_log:error("Failed to refresh channel config "
+ "for channel ~p. Reason ~p",
+ [C, Reason])
+ end
+ end,
+ list_local()),
+ ok.
+
+refresh_interceptors() ->
+ rabbit_misc:upmap(
+ fun (C) ->
+ try
+ gen_server2:call(C, refresh_interceptors, ?REFRESH_TIMEOUT)
+ catch _:Reason ->
+ rabbit_log:error("Failed to refresh channel interceptors "
+ "for channel ~p. Reason ~p",
+ [C, Reason])
+ end
+ end,
+ list_local()),
+ ok.
+
+-spec ready_for_close(pid()) -> 'ok'.
+
+ready_for_close(Pid) ->
+ rabbit_channel_common:ready_for_close(Pid).
+
+-spec force_event_refresh(reference()) -> 'ok'.
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+force_event_refresh(Ref) ->
+ [gen_server2:cast(C, {force_event_refresh, Ref}) || C <- list()],
+ ok.
+
+list_queue_states(Pid) ->
+ gen_server2:call(Pid, list_queue_states).
+
+-spec update_user_state(pid(), rabbit_types:auth_user()) -> 'ok' | {error, channel_terminated}.
+
+update_user_state(Pid, UserState) when is_pid(Pid) ->
+ case erlang:is_process_alive(Pid) of
+ true -> Pid ! {update_user_state, UserState},
+ ok;
+ false -> {error, channel_terminated}
+ end.
+
+%%---------------------------------------------------------------------------
+
+init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
+ Capabilities, CollectorPid, LimiterPid, AmqpParams]) ->
+ process_flag(trap_exit, true),
+ ?LG_PROCESS_TYPE(channel),
+ ?store_proc_name({ConnName, Channel}),
+ ok = pg_local:join(rabbit_channels, self()),
+ Flow = case rabbit_misc:get_env(rabbit, mirroring_flow_control, true) of
+ true -> flow;
+ false -> noflow
+ end,
+ {ok, {Global, Prefetch}} = application:get_env(rabbit, default_consumer_prefetch),
+ Limiter0 = rabbit_limiter:new(LimiterPid),
+ Limiter = case {Global, Prefetch} of
+ {true, 0} ->
+ rabbit_limiter:unlimit_prefetch(Limiter0);
+ {true, _} ->
+ rabbit_limiter:limit_prefetch(Limiter0, Prefetch, 0);
+ _ ->
+ Limiter0
+ end,
+ %% Process dictionary is used here because permission cache already uses it. MK.
+ put(permission_cache_can_expire, rabbit_access_control:permission_cache_can_expire(User)),
+ MaxMessageSize = get_max_message_size(),
+ ConsumerTimeout = get_consumer_timeout(),
+ OptionalVariables = extract_variable_map_from_amqp_params(AmqpParams),
+ {ok, GCThreshold} = application:get_env(rabbit, writer_gc_threshold),
+ State = #ch{cfg = #conf{state = starting,
+ protocol = Protocol,
+ channel = Channel,
+ reader_pid = ReaderPid,
+ writer_pid = WriterPid,
+ conn_pid = ConnPid,
+ conn_name = ConnName,
+ user = User,
+ virtual_host = VHost,
+ most_recently_declared_queue = <<>>,
+ queue_collector_pid = CollectorPid,
+ capabilities = Capabilities,
+ trace_state = rabbit_trace:init(VHost),
+ consumer_prefetch = Prefetch,
+ max_message_size = MaxMessageSize,
+ consumer_timeout = ConsumerTimeout,
+ authz_context = OptionalVariables,
+ writer_gc_threshold = GCThreshold
+ },
+ limiter = Limiter,
+ tx = none,
+ next_tag = 1,
+ unacked_message_q = ?QUEUE:new(),
+ queue_monitors = pmon:new(),
+ consumer_mapping = #{},
+ queue_consumers = #{},
+ confirm_enabled = false,
+ publish_seqno = 1,
+ unconfirmed = rabbit_confirms:init(),
+ rejected = [],
+ confirmed = [],
+ reply_consumer = none,
+ delivery_flow = Flow,
+ interceptor_state = undefined,
+ queue_states = rabbit_queue_type:init()
+ },
+ State1 = State#ch{
+ interceptor_state = rabbit_channel_interceptor:init(State)},
+ State2 = rabbit_event:init_stats_timer(State1, #ch.stats_timer),
+ Infos = infos(?CREATION_EVENT_KEYS, State2),
+ rabbit_core_metrics:channel_created(self(), Infos),
+ rabbit_event:notify(channel_created, Infos),
+ rabbit_event:if_enabled(State2, #ch.stats_timer,
+ fun() -> emit_stats(State2) end),
+ put_operation_timeout(),
+ State3 = init_tick_timer(State2),
+ {ok, State3, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ info -> 9;
+ {info, _Items} -> 9;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {confirm, _MsgSeqNos, _QPid} -> 5;
+ {reject_publish, _MsgSeqNos, _QPid} -> 5;
+ {queue_event, _, {confirm, _MsgSeqNos, _QPid}} -> 5;
+ {queue_event, _, {reject_publish, _MsgSeqNos, _QPid}} -> 5;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ emit_stats -> 7;
+ _ -> 0
+ end.
+
+handle_call(flush, _From, State) ->
+ reply(ok, State);
+
+handle_call({info, Deadline}, _From, State) ->
+ try
+ reply({ok, infos(?INFO_KEYS, Deadline, State)}, State)
+ catch
+ Error ->
+ reply({error, Error}, State)
+ end;
+
+handle_call({{info, Items}, Deadline}, _From, State) ->
+ try
+ reply({ok, infos(Items, Deadline, State)}, State)
+ catch
+ Error ->
+ reply({error, Error}, State)
+ end;
+
+handle_call(refresh_config, _From,
+ State = #ch{cfg = #conf{virtual_host = VHost} = Cfg}) ->
+ reply(ok, State#ch{cfg = Cfg#conf{trace_state = rabbit_trace:init(VHost)}});
+
+handle_call(refresh_interceptors, _From, State) ->
+ IState = rabbit_channel_interceptor:init(State),
+ reply(ok, State#ch{interceptor_state = IState});
+
+handle_call({declare_fast_reply_to, Key}, _From,
+ State = #ch{reply_consumer = Consumer}) ->
+ reply(case Consumer of
+ {_, _, Key} -> exists;
+ _ -> not_found
+ end, State);
+
+handle_call(list_queue_states, _From, State = #ch{queue_states = QueueStates}) ->
+ %% For testing of cleanup only
+ %% HACK
+ {reply, maps:keys(element(2, QueueStates)), State};
+
+handle_call(_Request, _From, State) ->
+ noreply(State).
+
+handle_cast({method, Method, Content, Flow},
+ State = #ch{cfg = #conf{reader_pid = Reader},
+ interceptor_state = IState}) ->
+ case Flow of
+ %% We are going to process a message from the rabbit_reader
+ %% process, so here we ack it. In this case we are accessing
+ %% the rabbit_channel process dictionary.
+ flow -> credit_flow:ack(Reader);
+ noflow -> ok
+ end,
+ try handle_method(rabbit_channel_interceptor:intercept_in(
+ expand_shortcuts(Method, State), Content, IState),
+ State) of
+ {reply, Reply, NewState} ->
+ ok = send(Reply, NewState),
+ noreply(NewState);
+ {noreply, NewState} ->
+ noreply(NewState);
+ stop ->
+ {stop, normal, State}
+ catch
+ exit:Reason = #amqp_error{} ->
+ MethodName = rabbit_misc:method_record_type(Method),
+ handle_exception(Reason#amqp_error{method = MethodName}, State);
+ _:Reason:Stacktrace ->
+ {stop, {Reason, Stacktrace}, State}
+ end;
+
+handle_cast(ready_for_close,
+ State = #ch{cfg = #conf{state = closing,
+ writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:send_command_sync(WriterPid, #'channel.close_ok'{}),
+ {stop, normal, State};
+
+handle_cast(terminate, State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:flush(WriterPid),
+ {stop, normal, State};
+
+handle_cast({command, #'basic.consume_ok'{consumer_tag = CTag} = Msg}, State) ->
+ ok = send(Msg, State),
+ noreply(consumer_monitor(CTag, State));
+
+handle_cast({command, Msg}, State) ->
+ ok = send(Msg, State),
+ noreply(State);
+
+handle_cast({deliver, _CTag, _AckReq, _Msg},
+ State = #ch{cfg = #conf{state = closing}}) ->
+ noreply(State);
+handle_cast({deliver, ConsumerTag, AckRequired, Msg}, State) ->
+ % TODO: handle as action
+ noreply(handle_deliver(ConsumerTag, AckRequired, Msg, State));
+
+handle_cast({deliver_reply, _K, _Del},
+ State = #ch{cfg = #conf{state = closing}}) ->
+ noreply(State);
+handle_cast({deliver_reply, _K, _Del}, State = #ch{reply_consumer = none}) ->
+ noreply(State);
+handle_cast({deliver_reply, Key, #delivery{message =
+ #basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content}}},
+ State = #ch{cfg = #conf{writer_pid = WriterPid},
+ next_tag = DeliveryTag,
+ reply_consumer = {ConsumerTag, _Suffix, Key}}) ->
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.deliver'{consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ redelivered = false,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey},
+ Content),
+ noreply(State);
+handle_cast({deliver_reply, _K1, _}, State=#ch{reply_consumer = {_, _, _K2}}) ->
+ noreply(State);
+
+handle_cast({send_credit_reply, Len},
+ State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:send_command(
+ WriterPid, #'basic.credit_ok'{available = Len}),
+ noreply(State);
+
+handle_cast({send_drained, CTagCredit},
+ State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
+ [ok = rabbit_writer:send_command(
+ WriterPid, #'basic.credit_drained'{consumer_tag = ConsumerTag,
+ credit_drained = CreditDrained})
+ || {ConsumerTag, CreditDrained} <- CTagCredit],
+ noreply(State);
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+handle_cast({force_event_refresh, Ref}, State) ->
+ rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State),
+ Ref),
+ noreply(rabbit_event:init_stats_timer(State, #ch.stats_timer));
+
+handle_cast({mandatory_received, _MsgSeqNo}, State) ->
+ %% This feature was used by `rabbit_amqqueue_process` and
+ %% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x.
+ %% It is unused in 3.8.x and thus deprecated. We keep it to support
+ %% in-place upgrades to 3.8.x (i.e. mixed-version clusters), but it
+ %% is a no-op starting with that version.
+ %%
+ %% NB: don't call noreply/1 since we don't want to send confirms.
+ noreply_coalesce(State);
+
+handle_cast({reject_publish, _MsgSeqNo, QPid} = Evt, State) ->
+ %% For backwards compatibility
+ QRef = find_queue_name_from_pid(QPid, State#ch.queue_states),
+ case QRef of
+ undefined ->
+ %% ignore if no queue could be found for the given pid
+ noreply(State);
+ _ ->
+ handle_cast({queue_event, QRef, Evt}, State)
+ end;
+
+handle_cast({confirm, _MsgSeqNo, QPid} = Evt, State) ->
+ %% For backwards compatibility
+ QRef = find_queue_name_from_pid(QPid, State#ch.queue_states),
+ case QRef of
+ undefined ->
+ %% ignore if no queue could be found for the given pid
+ noreply(State);
+ _ ->
+ handle_cast({queue_event, QRef, Evt}, State)
+ end;
+handle_cast({queue_event, QRef, Evt},
+ #ch{queue_states = QueueStates0} = State0) ->
+ case rabbit_queue_type:handle_event(QRef, Evt, QueueStates0) of
+ {ok, QState1, Actions} ->
+ State1 = State0#ch{queue_states = QState1},
+ State = handle_queue_actions(Actions, State1),
+ noreply_coalesce(State);
+ eol ->
+ State1 = handle_consuming_queue_down_or_eol(QRef, State0),
+ {ConfirmMXs, UC1} =
+ rabbit_confirms:remove_queue(QRef, State1#ch.unconfirmed),
+ %% Deleted queue is a special case.
+ %% Do not nack the "rejected" messages.
+ State2 = record_confirms(ConfirmMXs,
+ State1#ch{unconfirmed = UC1}),
+ erase_queue_stats(QRef),
+ noreply_coalesce(
+ State2#ch{queue_states = rabbit_queue_type:remove(QRef, QueueStates0)});
+ {protocol_error, Type, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(Type, Reason, ReasonArgs)
+ end.
+
+handle_info({ra_event, {Name, _} = From, Evt}, State) ->
+ %% For backwards compatibility
+ QRef = find_queue_name_from_quorum_name(Name, State#ch.queue_states),
+ handle_cast({queue_event, QRef, {From, Evt}}, State);
+
+handle_info({bump_credit, Msg}, State) ->
+ %% A rabbit_amqqueue_process is granting credit to our channel. If
+ %% our channel was being blocked by this process, and no other
+ %% process is blocking our channel, then this channel will be
+ %% unblocked. This means that any credit that was deferred will be
+ %% sent to rabbit_reader processs that might be blocked by this
+ %% particular channel.
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State);
+
+handle_info(timeout, State) ->
+ noreply(State);
+
+handle_info(emit_stats, State) ->
+ emit_stats(State),
+ State1 = rabbit_event:reset_stats_timer(State, #ch.stats_timer),
+ %% NB: don't call noreply/1 since we don't want to kick off the
+ %% stats timer.
+ {noreply, send_confirms_and_nacks(State1), hibernate};
+
+handle_info({'DOWN', _MRef, process, QPid, Reason},
+ #ch{queue_states = QStates0,
+ queue_monitors = _QMons} = State0) ->
+ credit_flow:peer_down(QPid),
+ case rabbit_queue_type:handle_down(QPid, Reason, QStates0) of
+ {ok, QState1, Actions} ->
+ State1 = State0#ch{queue_states = QState1},
+ State = handle_queue_actions(Actions, State1),
+ noreply_coalesce(State);
+ {eol, QRef} ->
+ State1 = handle_consuming_queue_down_or_eol(QRef, State0),
+ {ConfirmMXs, UC1} =
+ rabbit_confirms:remove_queue(QRef, State1#ch.unconfirmed),
+ %% Deleted queue is a special case.
+ %% Do not nack the "rejected" messages.
+ State2 = record_confirms(ConfirmMXs,
+ State1#ch{unconfirmed = UC1}),
+ erase_queue_stats(QRef),
+ noreply_coalesce(
+ State2#ch{queue_states = rabbit_queue_type:remove(QRef, QStates0)})
+ end;
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+
+handle_info({{Ref, Node}, LateAnswer},
+ State = #ch{cfg = #conf{channel = Channel}})
+ when is_reference(Ref) ->
+ rabbit_log_channel:warning("Channel ~p ignoring late answer ~p from ~p",
+ [Channel, LateAnswer, Node]),
+ noreply(State);
+
+handle_info(tick, State0 = #ch{queue_states = QueueStates0}) ->
+ case get(permission_cache_can_expire) of
+ true -> ok = clear_permission_cache();
+ _ -> ok
+ end,
+ case evaluate_consumer_timeout(State0#ch{queue_states = QueueStates0}) of
+ {noreply, State} ->
+ noreply(init_tick_timer(reset_tick_timer(State)));
+ Return ->
+ Return
+ end;
+handle_info({update_user_state, User}, State = #ch{cfg = Cfg}) ->
+ noreply(State#ch{cfg = Cfg#conf{user = User}}).
+
+
+handle_pre_hibernate(State0) ->
+ ok = clear_permission_cache(),
+ State = maybe_cancel_tick_timer(State0),
+ rabbit_event:if_enabled(
+ State, #ch.stats_timer,
+ fun () -> emit_stats(State,
+ [{idle_since,
+ os:system_time(milli_seconds)}])
+ end),
+ {hibernate, rabbit_event:stop_stats_timer(State, #ch.stats_timer)}.
+
+handle_post_hibernate(State0) ->
+ State = init_tick_timer(State0),
+ {noreply, State}.
+
+terminate(_Reason,
+ State = #ch{cfg = #conf{user = #user{username = Username}},
+ queue_states = QueueCtxs}) ->
+ _ = rabbit_queue_type:close(QueueCtxs),
+ {_Res, _State1} = notify_queues(State),
+ pg_local:leave(rabbit_channels, self()),
+ rabbit_event:if_enabled(State, #ch.stats_timer,
+ fun() -> emit_stats(State) end),
+ [delete_stats(Tag) || {Tag, _} <- get()],
+ rabbit_core_metrics:channel_closed(self()),
+ rabbit_event:notify(channel_closed, [{pid, self()},
+ {user_who_performed_action, Username}]).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+-spec get_max_message_size() -> non_neg_integer().
+
+get_max_message_size() ->
+ case application:get_env(rabbit, max_message_size) of
+ {ok, MS} when is_integer(MS) ->
+ erlang:min(MS, ?MAX_MSG_SIZE);
+ _ ->
+ ?MAX_MSG_SIZE
+ end.
+
+get_consumer_timeout() ->
+ case application:get_env(rabbit, consumer_timeout) of
+ {ok, MS} when is_integer(MS) ->
+ MS;
+ _ ->
+ undefined
+ end.
+%%---------------------------------------------------------------------------
+
+reply(Reply, NewState) -> {reply, Reply, next_state(NewState), hibernate}.
+
+noreply(NewState) -> {noreply, next_state(NewState), hibernate}.
+
+next_state(State) -> ensure_stats_timer(send_confirms_and_nacks(State)).
+
+noreply_coalesce(State = #ch{confirmed = C, rejected = R}) ->
+ Timeout = case {C, R} of {[], []} -> hibernate; _ -> 0 end,
+ {noreply, ensure_stats_timer(State), Timeout}.
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State, #ch.stats_timer, emit_stats).
+
+return_ok(State, true, _Msg) -> {noreply, State};
+return_ok(State, false, Msg) -> {reply, Msg, State}.
+
+ok_msg(true, _Msg) -> undefined;
+ok_msg(false, Msg) -> Msg.
+
+send(_Command, #ch{cfg = #conf{state = closing}}) ->
+ ok;
+send(Command, #ch{cfg = #conf{writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:send_command(WriterPid, Command).
+
+format_soft_error(#amqp_error{name = N, explanation = E, method = M}) ->
+ io_lib:format("operation ~s caused a channel exception ~s: ~ts", [M, N, E]).
+
+handle_exception(Reason, State = #ch{cfg = #conf{protocol = Protocol,
+ channel = Channel,
+ writer_pid = WriterPid,
+ reader_pid = ReaderPid,
+ conn_pid = ConnPid,
+ conn_name = ConnName,
+ virtual_host = VHost,
+ user = User
+ }}) ->
+ %% something bad's happened: notify_queues may not be 'ok'
+ {_Result, State1} = notify_queues(State),
+ case rabbit_binary_generator:map_exception(Channel, Reason, Protocol) of
+ {Channel, CloseMethod} ->
+ rabbit_log_channel:error(
+ "Channel error on connection ~p (~s, vhost: '~s',"
+ " user: '~s'), channel ~p:~n~s~n",
+ [ConnPid, ConnName, VHost, User#user.username,
+ Channel, format_soft_error(Reason)]),
+ ok = rabbit_writer:send_command(WriterPid, CloseMethod),
+ {noreply, State1};
+ {0, _} ->
+ ReaderPid ! {channel_exit, Channel, Reason},
+ {stop, normal, State1}
+ end.
+
+-spec precondition_failed(string()) -> no_return().
+
+precondition_failed(Format) -> precondition_failed(Format, []).
+
+-spec precondition_failed(string(), [any()]) -> no_return().
+
+precondition_failed(Format, Params) ->
+ rabbit_misc:protocol_error(precondition_failed, Format, Params).
+
+return_queue_declare_ok(#resource{name = ActualName},
+ NoWait, MessageCount, ConsumerCount,
+ #ch{cfg = Cfg} = State) ->
+ return_ok(State#ch{cfg = Cfg#conf{most_recently_declared_queue = ActualName}},
+ NoWait, #'queue.declare_ok'{queue = ActualName,
+ message_count = MessageCount,
+ consumer_count = ConsumerCount}).
+
+check_resource_access(User, Resource, Perm, Context) ->
+ V = {Resource, Context, Perm},
+
+ Cache = case get(permission_cache) of
+ undefined -> [];
+ Other -> Other
+ end,
+ case lists:member(V, Cache) of
+ true -> ok;
+ false -> ok = rabbit_access_control:check_resource_access(
+ User, Resource, Perm, Context),
+ CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE-1),
+ put(permission_cache, [V | CacheTail])
+ end.
+
+clear_permission_cache() -> erase(permission_cache),
+ erase(topic_permission_cache),
+ ok.
+
+check_configure_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, configure, Context).
+
+check_write_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, write, Context).
+
+check_read_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, read, Context).
+
+check_write_permitted_on_topic(Resource, User, RoutingKey, AuthzContext) ->
+ check_topic_authorisation(Resource, User, RoutingKey, AuthzContext, write).
+
+check_read_permitted_on_topic(Resource, User, RoutingKey, AuthzContext) ->
+ check_topic_authorisation(Resource, User, RoutingKey, AuthzContext, read).
+
+check_user_id_header(#'P_basic'{user_id = undefined}, _) ->
+ ok;
+check_user_id_header(#'P_basic'{user_id = Username},
+ #ch{cfg = #conf{user = #user{username = Username}}}) ->
+ ok;
+check_user_id_header(
+ #'P_basic'{}, #ch{cfg = #conf{user = #user{authz_backends =
+ [{rabbit_auth_backend_dummy, _}]}}}) ->
+ ok;
+check_user_id_header(#'P_basic'{user_id = Claimed},
+ #ch{cfg = #conf{user = #user{username = Actual,
+ tags = Tags}}}) ->
+ case lists:member(impersonator, Tags) of
+ true -> ok;
+ false -> precondition_failed(
+ "user_id property set to '~s' but authenticated user was "
+ "'~s'", [Claimed, Actual])
+ end.
+
+check_expiration_header(Props) ->
+ case rabbit_basic:parse_expiration(Props) of
+ {ok, _} -> ok;
+ {error, E} -> precondition_failed("invalid expiration '~s': ~p",
+ [Props#'P_basic'.expiration, E])
+ end.
+
+check_internal_exchange(#exchange{name = Name, internal = true}) ->
+ rabbit_misc:protocol_error(access_refused,
+ "cannot publish to internal ~s",
+ [rabbit_misc:rs(Name)]);
+check_internal_exchange(_) ->
+ ok.
+
+check_topic_authorisation(#exchange{name = Name = #resource{virtual_host = VHost}, type = topic},
+ User = #user{username = Username},
+ RoutingKey, AuthzContext, Permission) ->
+ Resource = Name#resource{kind = topic},
+ VariableMap = build_topic_variable_map(AuthzContext, VHost, Username),
+ Context = #{routing_key => RoutingKey,
+ variable_map => VariableMap},
+ Cache = case get(topic_permission_cache) of
+ undefined -> [];
+ Other -> Other
+ end,
+ case lists:member({Resource, Context, Permission}, Cache) of
+ true -> ok;
+ false -> ok = rabbit_access_control:check_topic_access(
+ User, Resource, Permission, Context),
+ CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE-1),
+ put(topic_permission_cache, [{Resource, Context, Permission} | CacheTail])
+ end;
+check_topic_authorisation(_, _, _, _, _) ->
+ ok.
+
+
+build_topic_variable_map(AuthzContext, VHost, Username) when is_map(AuthzContext) ->
+ maps:merge(AuthzContext, #{<<"vhost">> => VHost, <<"username">> => Username});
+build_topic_variable_map(AuthzContext, VHost, Username) ->
+ maps:merge(extract_variable_map_from_amqp_params(AuthzContext), #{<<"vhost">> => VHost, <<"username">> => Username}).
+
+%% Use tuple representation of amqp_params to avoid a dependency on amqp_client.
+%% Extracts variable map only from amqp_params_direct, not amqp_params_network.
+%% amqp_params_direct records are usually used by plugins (e.g. MQTT, STOMP)
+extract_variable_map_from_amqp_params({amqp_params, {amqp_params_direct, _, _, _, _,
+ {amqp_adapter_info, _,_,_,_,_,_,AdditionalInfo}, _}}) ->
+ proplists:get_value(variable_map, AdditionalInfo, #{});
+extract_variable_map_from_amqp_params({amqp_params_direct, _, _, _, _,
+ {amqp_adapter_info, _,_,_,_,_,_,AdditionalInfo}, _}) ->
+ proplists:get_value(variable_map, AdditionalInfo, #{});
+extract_variable_map_from_amqp_params([Value]) ->
+ extract_variable_map_from_amqp_params(Value);
+extract_variable_map_from_amqp_params(_) ->
+ #{}.
+
+check_msg_size(Content, MaxMessageSize, GCThreshold) ->
+ Size = rabbit_basic:maybe_gc_large_msg(Content, GCThreshold),
+ case Size of
+ S when S > MaxMessageSize ->
+ ErrorMessage = case MaxMessageSize of
+ ?MAX_MSG_SIZE ->
+ "message size ~B is larger than max size ~B";
+ _ ->
+ "message size ~B is larger than configured max size ~B"
+ end,
+ precondition_failed(ErrorMessage,
+ [Size, MaxMessageSize]);
+ _ -> ok
+ end.
+
+check_vhost_queue_limit(#resource{name = QueueName}, VHost) ->
+ case rabbit_vhost_limit:is_over_queue_limit(VHost) of
+ false -> ok;
+ {true, Limit} -> precondition_failed("cannot declare queue '~s': "
+ "queue limit in vhost '~s' (~p) is reached",
+ [QueueName, VHost, Limit])
+
+ end.
+
+qbin_to_resource(QueueNameBin, VHostPath) ->
+ name_to_resource(queue, QueueNameBin, VHostPath).
+
+name_to_resource(Type, NameBin, VHostPath) ->
+ rabbit_misc:r(VHostPath, Type, NameBin).
+
+expand_queue_name_shortcut(<<>>, #ch{cfg = #conf{most_recently_declared_queue = <<>>}}) ->
+ rabbit_misc:protocol_error(not_found, "no previously declared queue", []);
+expand_queue_name_shortcut(<<>>, #ch{cfg = #conf{most_recently_declared_queue = MRDQ}}) ->
+ MRDQ;
+expand_queue_name_shortcut(QueueNameBin, _) ->
+ QueueNameBin.
+
+expand_routing_key_shortcut(<<>>, <<>>,
+ #ch{cfg = #conf{most_recently_declared_queue = <<>>}}) ->
+ rabbit_misc:protocol_error(not_found, "no previously declared queue", []);
+expand_routing_key_shortcut(<<>>, <<>>,
+ #ch{cfg = #conf{most_recently_declared_queue = MRDQ}}) ->
+ MRDQ;
+expand_routing_key_shortcut(_QueueNameBin, RoutingKey, _State) ->
+ RoutingKey.
+
+expand_shortcuts(#'basic.get' {queue = Q} = M, State) ->
+ M#'basic.get' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'basic.consume'{queue = Q} = M, State) ->
+ M#'basic.consume'{queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.delete' {queue = Q} = M, State) ->
+ M#'queue.delete' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.purge' {queue = Q} = M, State) ->
+ M#'queue.purge' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.bind' {queue = Q, routing_key = K} = M, State) ->
+ M#'queue.bind' {queue = expand_queue_name_shortcut(Q, State),
+ routing_key = expand_routing_key_shortcut(Q, K, State)};
+expand_shortcuts(#'queue.unbind' {queue = Q, routing_key = K} = M, State) ->
+ M#'queue.unbind' {queue = expand_queue_name_shortcut(Q, State),
+ routing_key = expand_routing_key_shortcut(Q, K, State)};
+expand_shortcuts(M, _State) ->
+ M.
+
+check_not_default_exchange(#resource{kind = exchange, name = <<"">>}) ->
+ rabbit_misc:protocol_error(
+ access_refused, "operation not permitted on the default exchange", []);
+check_not_default_exchange(_) ->
+ ok.
+
+check_exchange_deletion(XName = #resource{name = <<"amq.", _/binary>>,
+ kind = exchange}) ->
+ rabbit_misc:protocol_error(
+ access_refused, "deletion of system ~s not allowed",
+ [rabbit_misc:rs(XName)]);
+check_exchange_deletion(_) ->
+ ok.
+
+%% check that an exchange/queue name does not contain the reserved
+%% "amq." prefix.
+%%
+%% As per the AMQP 0-9-1 spec, the exclusion of "amq." prefixed names
+%% only applies on actual creation, and not in the cases where the
+%% entity already exists or passive=true.
+%%
+%% NB: We deliberately do not enforce the other constraints on names
+%% required by the spec.
+check_name(Kind, NameBin = <<"amq.", _/binary>>) ->
+ rabbit_misc:protocol_error(
+ access_refused,
+ "~s name '~s' contains reserved prefix 'amq.*'",[Kind, NameBin]);
+check_name(_Kind, NameBin) ->
+ NameBin.
+
+strip_cr_lf(NameBin) ->
+ binary:replace(NameBin, [<<"\n">>, <<"\r">>], <<"">>, [global]).
+
+
+maybe_set_fast_reply_to(
+ C = #content{properties = P = #'P_basic'{reply_to =
+ <<"amq.rabbitmq.reply-to">>}},
+ #ch{reply_consumer = ReplyConsumer}) ->
+ case ReplyConsumer of
+ none -> rabbit_misc:protocol_error(
+ precondition_failed,
+ "fast reply consumer does not exist", []);
+ {_, Suf, _K} -> Rep = <<"amq.rabbitmq.reply-to.", Suf/binary>>,
+ rabbit_binary_generator:clear_encoded_content(
+ C#content{properties = P#'P_basic'{reply_to = Rep}})
+ end;
+maybe_set_fast_reply_to(C, _State) ->
+ C.
+
+record_rejects([], State) ->
+ State;
+record_rejects(MXs, State = #ch{rejected = R, tx = Tx}) ->
+ Tx1 = case Tx of
+ none -> none;
+ _ -> failed
+ end,
+ State#ch{rejected = [MXs | R], tx = Tx1}.
+
+record_confirms([], State) ->
+ State;
+record_confirms(MXs, State = #ch{confirmed = C}) ->
+ State#ch{confirmed = [MXs | C]}.
+
+handle_method({Method, Content}, State) ->
+ handle_method(Method, Content, State).
+
+handle_method(#'channel.open'{}, _,
+ State = #ch{cfg = #conf{state = starting} = Cfg}) ->
+ %% Don't leave "starting" as the state for 5s. TODO is this TRTTD?
+ State1 = State#ch{cfg = Cfg#conf{state = running}},
+ rabbit_event:if_enabled(State1, #ch.stats_timer,
+ fun() -> emit_stats(State1) end),
+ {reply, #'channel.open_ok'{}, State1};
+
+handle_method(#'channel.open'{}, _, _State) ->
+ rabbit_misc:protocol_error(
+ channel_error, "second 'channel.open' seen", []);
+
+handle_method(_Method, _, #ch{cfg = #conf{state = starting}}) ->
+ rabbit_misc:protocol_error(channel_error, "expected 'channel.open'", []);
+
+handle_method(#'channel.close_ok'{}, _, #ch{cfg = #conf{state = closing}}) ->
+ stop;
+
+handle_method(#'channel.close'{}, _,
+ State = #ch{cfg = #conf{state = closing,
+ writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:send_command(WriterPid, #'channel.close_ok'{}),
+ {noreply, State};
+
+handle_method(_Method, _, State = #ch{cfg = #conf{state = closing}}) ->
+ {noreply, State};
+
+handle_method(#'channel.close'{}, _,
+ State = #ch{cfg = #conf{reader_pid = ReaderPid}}) ->
+ {_Result, State1} = notify_queues(State),
+ %% We issue the channel.close_ok response after a handshake with
+ %% the reader, the other half of which is ready_for_close. That
+ %% way the reader forgets about the channel before we send the
+ %% response (and this channel process terminates). If we didn't do
+ %% that, a channel.open for the same channel number, which a
+ %% client is entitled to send as soon as it has received the
+ %% close_ok, might be received by the reader before it has seen
+ %% the termination and hence be sent to the old, now dead/dying
+ %% channel process, instead of a new process, and thus lost.
+ ReaderPid ! {channel_closing, self()},
+ {noreply, State1};
+
+%% Even though the spec prohibits the client from sending commands
+%% while waiting for the reply to a synchronous command, we generally
+%% do allow this...except in the case of a pending tx.commit, where
+%% it could wreak havoc.
+handle_method(_Method, _, #ch{tx = Tx})
+ when Tx =:= committing orelse Tx =:= failed ->
+ rabbit_misc:protocol_error(
+ channel_error, "unexpected command while processing 'tx.commit'", []);
+
+handle_method(#'access.request'{},_, State) ->
+ {reply, #'access.request_ok'{ticket = 1}, State};
+
+handle_method(#'basic.publish'{immediate = true}, _Content, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "immediate=true", []);
+
+handle_method(#'basic.publish'{exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ mandatory = Mandatory},
+ Content, State = #ch{cfg = #conf{channel = ChannelNum,
+ conn_name = ConnName,
+ virtual_host = VHostPath,
+ user = #user{username = Username} = User,
+ trace_state = TraceState,
+ max_message_size = MaxMessageSize,
+ authz_context = AuthzContext,
+ writer_gc_threshold = GCThreshold
+ },
+ tx = Tx,
+ confirm_enabled = ConfirmEnabled,
+ delivery_flow = Flow
+ }) ->
+ check_msg_size(Content, MaxMessageSize, GCThreshold),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ check_write_permitted(ExchangeName, User, AuthzContext),
+ Exchange = rabbit_exchange:lookup_or_die(ExchangeName),
+ check_internal_exchange(Exchange),
+ check_write_permitted_on_topic(Exchange, User, RoutingKey, AuthzContext),
+ %% We decode the content's properties here because we're almost
+ %% certain to want to look at delivery-mode and priority.
+ DecodedContent = #content {properties = Props} =
+ maybe_set_fast_reply_to(
+ rabbit_binary_parser:ensure_content_decoded(Content), State),
+ check_user_id_header(Props, State),
+ check_expiration_header(Props),
+ DoConfirm = Tx =/= none orelse ConfirmEnabled,
+ {MsgSeqNo, State1} =
+ case DoConfirm orelse Mandatory of
+ false -> {undefined, State};
+ true -> SeqNo = State#ch.publish_seqno,
+ {SeqNo, State#ch{publish_seqno = SeqNo + 1}}
+ end,
+ case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of
+ {ok, Message} ->
+ Delivery = rabbit_basic:delivery(
+ Mandatory, DoConfirm, Message, MsgSeqNo),
+ QNames = rabbit_exchange:route(Exchange, Delivery),
+ rabbit_trace:tap_in(Message, QNames, ConnName, ChannelNum,
+ Username, TraceState),
+ DQ = {Delivery#delivery{flow = Flow}, QNames},
+ {noreply, case Tx of
+ none -> deliver_to_queues(DQ, State1);
+ {Msgs, Acks} -> Msgs1 = ?QUEUE:in(DQ, Msgs),
+ State1#ch{tx = {Msgs1, Acks}}
+ end};
+ {error, Reason} ->
+ precondition_failed("invalid message: ~p", [Reason])
+ end;
+
+handle_method(#'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = Multiple,
+ requeue = Requeue}, _, State) ->
+ reject(DeliveryTag, Requeue, Multiple, State);
+
+handle_method(#'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = Multiple},
+ _, State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
+ {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
+ State1 = State#ch{unacked_message_q = Remaining},
+ {noreply, case Tx of
+ none -> {State2, Actions} = ack(Acked, State1),
+ handle_queue_actions(Actions, State2);
+ {Msgs, Acks} -> Acks1 = ack_cons(ack, Acked, Acks),
+ State1#ch{tx = {Msgs, Acks1}}
+ end};
+
+handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck},
+ _, State = #ch{cfg = #conf{writer_pid = WriterPid,
+ conn_pid = ConnPid,
+ user = User,
+ virtual_host = VHostPath,
+ authz_context = AuthzContext
+ },
+ limiter = Limiter,
+ next_tag = DeliveryTag,
+ queue_states = QueueStates0}) ->
+ QueueName = qbin_to_resource(QueueNameBin, VHostPath),
+ check_read_permitted(QueueName, User, AuthzContext),
+ case rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ %% Use the delivery tag as consumer tag for quorum queues
+ fun (Q) ->
+ rabbit_queue_type:dequeue(
+ Q, NoAck, rabbit_limiter:pid(Limiter),
+ DeliveryTag, QueueStates0)
+ end) of
+ {ok, MessageCount, Msg, QueueStates} ->
+ handle_basic_get(WriterPid, DeliveryTag, NoAck, MessageCount, Msg,
+ State#ch{queue_states = QueueStates});
+ {empty, QueueStates} ->
+ ?INCR_STATS(queue_stats, QueueName, 1, get_empty, State),
+ {reply, #'basic.get_empty'{}, State#ch{queue_states = QueueStates}};
+ empty ->
+ ?INCR_STATS(queue_stats, QueueName, 1, get_empty, State),
+ {reply, #'basic.get_empty'{}, State};
+ {error, {unsupported, single_active_consumer}} ->
+ rabbit_misc:protocol_error(
+ resource_locked,
+ "cannot obtain access to locked ~s. basic.get operations "
+ "are not supported by quorum queues with single active consumer",
+ [rabbit_misc:rs(QueueName)]);
+ {error, Reason} ->
+ %% TODO add queue type to error message
+ rabbit_misc:protocol_error(internal_error,
+ "Cannot get a message from queue '~s': ~p",
+ [rabbit_misc:rs(QueueName), Reason]);
+ {protocol_error, Type, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(Type, Reason, ReasonArgs)
+ end;
+
+handle_method(#'basic.consume'{queue = <<"amq.rabbitmq.reply-to">>,
+ consumer_tag = CTag0,
+ no_ack = NoAck,
+ nowait = NoWait},
+ _, State = #ch{reply_consumer = ReplyConsumer,
+ consumer_mapping = ConsumerMapping}) ->
+ case maps:find(CTag0, ConsumerMapping) of
+ error ->
+ case {ReplyConsumer, NoAck} of
+ {none, true} ->
+ CTag = case CTag0 of
+ <<>> -> rabbit_guid:binary(
+ rabbit_guid:gen_secure(), "amq.ctag");
+ Other -> Other
+ end,
+ %% Precalculate both suffix and key; base64 encoding is
+ %% expensive
+ Key = base64:encode(rabbit_guid:gen_secure()),
+ PidEnc = base64:encode(term_to_binary(self())),
+ Suffix = <<PidEnc/binary, ".", Key/binary>>,
+ Consumer = {CTag, Suffix, binary_to_list(Key)},
+ State1 = State#ch{reply_consumer = Consumer},
+ case NoWait of
+ true -> {noreply, State1};
+ false -> Rep = #'basic.consume_ok'{consumer_tag = CTag},
+ {reply, Rep, State1}
+ end;
+ {_, false} ->
+ rabbit_misc:protocol_error(
+ precondition_failed,
+ "reply consumer cannot acknowledge", []);
+ _ ->
+ rabbit_misc:protocol_error(
+ precondition_failed, "reply consumer already set", [])
+ end;
+ {ok, _} ->
+ %% Attempted reuse of consumer tag.
+ rabbit_misc:protocol_error(
+ not_allowed, "attempt to reuse consumer tag '~s'", [CTag0])
+ end;
+
+handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait},
+ _, State = #ch{reply_consumer = {ConsumerTag, _, _}}) ->
+ State1 = State#ch{reply_consumer = none},
+ case NoWait of
+ true -> {noreply, State1};
+ false -> Rep = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
+ {reply, Rep, State1}
+ end;
+
+handle_method(#'basic.consume'{queue = QueueNameBin,
+ consumer_tag = ConsumerTag,
+ no_local = _, % FIXME: implement
+ no_ack = NoAck,
+ exclusive = ExclusiveConsume,
+ nowait = NoWait,
+ arguments = Args},
+ _, State = #ch{cfg = #conf{consumer_prefetch = ConsumerPrefetch,
+ user = User,
+ virtual_host = VHostPath,
+ authz_context = AuthzContext},
+ consumer_mapping = ConsumerMapping
+ }) ->
+ case maps:find(ConsumerTag, ConsumerMapping) of
+ error ->
+ QueueName = qbin_to_resource(QueueNameBin, VHostPath),
+ check_read_permitted(QueueName, User, AuthzContext),
+ ActualConsumerTag =
+ case ConsumerTag of
+ <<>> -> rabbit_guid:binary(rabbit_guid:gen_secure(),
+ "amq.ctag");
+ Other -> Other
+ end,
+ case basic_consume(
+ QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args, NoWait, State) of
+ {ok, State1} ->
+ {noreply, State1};
+ {error, exclusive_consume_unavailable} ->
+ rabbit_misc:protocol_error(
+ access_refused, "~s in exclusive use",
+ [rabbit_misc:rs(QueueName)]);
+ {error, global_qos_not_supported_for_queue_type} ->
+ rabbit_misc:protocol_error(
+ not_implemented, "~s does not support global qos",
+ [rabbit_misc:rs(QueueName)])
+ end;
+ {ok, _} ->
+ %% Attempted reuse of consumer tag.
+ rabbit_misc:protocol_error(
+ not_allowed, "attempt to reuse consumer tag '~s'", [ConsumerTag])
+ end;
+
+handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait},
+ _, State = #ch{cfg = #conf{user = #user{username = Username}},
+ consumer_mapping = ConsumerMapping,
+ queue_consumers = QCons,
+ queue_states = QueueStates0}) ->
+ OkMsg = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
+ case maps:find(ConsumerTag, ConsumerMapping) of
+ error ->
+ %% Spec requires we ignore this situation.
+ return_ok(State, NoWait, OkMsg);
+ {ok, {Q, _CParams}} when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+
+ ConsumerMapping1 = maps:remove(ConsumerTag, ConsumerMapping),
+ QCons1 =
+ case maps:find(QName, QCons) of
+ error -> QCons;
+ {ok, CTags} -> CTags1 = gb_sets:delete(ConsumerTag, CTags),
+ case gb_sets:is_empty(CTags1) of
+ true -> maps:remove(QName, QCons);
+ false -> maps:put(QName, CTags1, QCons)
+ end
+ end,
+ NewState = State#ch{consumer_mapping = ConsumerMapping1,
+ queue_consumers = QCons1},
+ %% In order to ensure that no more messages are sent to
+ %% the consumer after the cancel_ok has been sent, we get
+ %% the queue process to send the cancel_ok on our
+ %% behalf. If we were sending the cancel_ok ourselves it
+ %% might overtake a message sent previously by the queue.
+ case rabbit_misc:with_exit_handler(
+ fun () -> {error, not_found} end,
+ fun () ->
+ rabbit_queue_type:cancel(
+ Q, ConsumerTag, ok_msg(NoWait, OkMsg),
+ Username, QueueStates0)
+ end) of
+ {ok, QueueStates} ->
+ {noreply, NewState#ch{queue_states = QueueStates}};
+ {error, not_found} ->
+ %% Spec requires we ignore this situation.
+ return_ok(NewState, NoWait, OkMsg)
+ end
+ end;
+
+handle_method(#'basic.qos'{prefetch_size = Size}, _, _State) when Size /= 0 ->
+ rabbit_misc:protocol_error(not_implemented,
+ "prefetch_size!=0 (~w)", [Size]);
+
+handle_method(#'basic.qos'{global = false,
+ prefetch_count = PrefetchCount},
+ _, State = #ch{cfg = Cfg,
+ limiter = Limiter}) ->
+ %% Ensures that if default was set, it's overridden
+ Limiter1 = rabbit_limiter:unlimit_prefetch(Limiter),
+ {reply, #'basic.qos_ok'{}, State#ch{cfg = Cfg#conf{consumer_prefetch = PrefetchCount},
+ limiter = Limiter1}};
+
+handle_method(#'basic.qos'{global = true,
+ prefetch_count = 0},
+ _, State = #ch{limiter = Limiter}) ->
+ Limiter1 = rabbit_limiter:unlimit_prefetch(Limiter),
+ {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
+
+handle_method(#'basic.qos'{global = true,
+ prefetch_count = PrefetchCount},
+ _, State = #ch{limiter = Limiter, unacked_message_q = UAMQ}) ->
+ %% TODO ?QUEUE:len(UAMQ) is not strictly right since that counts
+ %% unacked messages from basic.get too. Pretty obscure though.
+ Limiter1 = rabbit_limiter:limit_prefetch(Limiter,
+ PrefetchCount, ?QUEUE:len(UAMQ)),
+ case ((not rabbit_limiter:is_active(Limiter)) andalso
+ rabbit_limiter:is_active(Limiter1)) of
+ true -> rabbit_amqqueue:activate_limit_all(
+ classic_consumer_queue_pids(State#ch.consumer_mapping), self());
+ false -> ok
+ end,
+ {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
+
+handle_method(#'basic.recover_async'{requeue = true},
+ _, State = #ch{unacked_message_q = UAMQ,
+ limiter = Limiter,
+ queue_states = QueueStates0}) ->
+ OkFun = fun () -> ok end,
+ UAMQL = ?QUEUE:to_list(UAMQ),
+ {QueueStates, Actions} =
+ foreach_per_queue(
+ fun ({QPid, CTag}, MsgIds, {Acc0, Actions0}) ->
+ rabbit_misc:with_exit_handler(
+ OkFun,
+ fun () ->
+ {ok, Acc, Act} = rabbit_amqqueue:requeue(QPid, {CTag, MsgIds}, Acc0),
+ {Acc, Act ++ Actions0}
+ end)
+ end, lists:reverse(UAMQL), {QueueStates0, []}),
+ ok = notify_limiter(Limiter, UAMQL),
+ State1 = handle_queue_actions(Actions, State#ch{unacked_message_q = ?QUEUE:new(),
+ queue_states = QueueStates}),
+ %% No answer required - basic.recover is the newer, synchronous
+ %% variant of this method
+ {noreply, State1};
+
+handle_method(#'basic.recover_async'{requeue = false}, _, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "requeue=false", []);
+
+handle_method(#'basic.recover'{requeue = Requeue}, Content, State) ->
+ {noreply, State1} = handle_method(#'basic.recover_async'{requeue = Requeue},
+ Content, State),
+ {reply, #'basic.recover_ok'{}, State1};
+
+handle_method(#'basic.reject'{delivery_tag = DeliveryTag, requeue = Requeue},
+ _, State) ->
+ reject(DeliveryTag, Requeue, false, State);
+
+handle_method(#'exchange.declare'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{virtual_host = VHostPath,
+ user = User,
+ queue_collector_pid = CollectorPid,
+ conn_pid = ConnPid,
+ authz_context = AuthzContext}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'exchange.declare_ok'{});
+
+handle_method(#'exchange.delete'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ virtual_host = VHostPath,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'exchange.delete_ok'{});
+
+handle_method(#'exchange.bind'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{virtual_host = VHostPath,
+ conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'exchange.bind_ok'{});
+
+handle_method(#'exchange.unbind'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{virtual_host = VHostPath,
+ conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'exchange.unbind_ok'{});
+
+handle_method(#'queue.declare'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{virtual_host = VHostPath,
+ conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ {ok, QueueName, MessageCount, ConsumerCount} =
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_queue_declare_ok(QueueName, NoWait, MessageCount,
+ ConsumerCount, State);
+
+handle_method(#'queue.delete'{nowait = NoWait} = Method, _,
+ State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ virtual_host = VHostPath,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ {ok, PurgedMessageCount} =
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait,
+ #'queue.delete_ok'{message_count = PurgedMessageCount});
+
+handle_method(#'queue.bind'{nowait = NoWait} = Method, _,
+ State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ user = User,
+ queue_collector_pid = CollectorPid,
+ virtual_host = VHostPath}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'queue.bind_ok'{});
+
+handle_method(#'queue.unbind'{} = Method, _,
+ State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ user = User,
+ queue_collector_pid = CollectorPid,
+ virtual_host = VHostPath}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, false, #'queue.unbind_ok'{});
+
+handle_method(#'queue.purge'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ user = User,
+ queue_collector_pid = CollectorPid,
+ virtual_host = VHostPath}}) ->
+ case handle_method(Method, ConnPid, AuthzContext, CollectorPid,
+ VHostPath, User) of
+ {ok, PurgedMessageCount} ->
+ return_ok(State, NoWait,
+ #'queue.purge_ok'{message_count = PurgedMessageCount})
+ end;
+
+handle_method(#'tx.select'{}, _, #ch{confirm_enabled = true}) ->
+ precondition_failed("cannot switch from confirm to tx mode");
+
+handle_method(#'tx.select'{}, _, State = #ch{tx = none}) ->
+ {reply, #'tx.select_ok'{}, State#ch{tx = new_tx()}};
+
+handle_method(#'tx.select'{}, _, State) ->
+ {reply, #'tx.select_ok'{}, State};
+
+handle_method(#'tx.commit'{}, _, #ch{tx = none}) ->
+ precondition_failed("channel is not transactional");
+
+handle_method(#'tx.commit'{}, _, State = #ch{tx = {Msgs, Acks},
+ limiter = Limiter}) ->
+ State1 = queue_fold(fun deliver_to_queues/2, State, Msgs),
+ Rev = fun (X) -> lists:reverse(lists:sort(X)) end,
+ {State2, Actions2} =
+ lists:foldl(fun ({ack, A}, {Acc, Actions}) ->
+ {Acc0, Actions0} = ack(Rev(A), Acc),
+ {Acc0, Actions ++ Actions0};
+ ({Requeue, A}, {Acc, Actions}) ->
+ {Acc0, Actions0} = internal_reject(Requeue, Rev(A), Limiter, Acc),
+ {Acc0, Actions ++ Actions0}
+ end, {State1, []}, lists:reverse(Acks)),
+ State3 = handle_queue_actions(Actions2, State2),
+ {noreply, maybe_complete_tx(State3#ch{tx = committing})};
+
+handle_method(#'tx.rollback'{}, _, #ch{tx = none}) ->
+ precondition_failed("channel is not transactional");
+
+handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ,
+ tx = {_Msgs, Acks}}) ->
+ AcksL = lists:append(lists:reverse([lists:reverse(L) || {_, L} <- Acks])),
+ UAMQ1 = ?QUEUE:from_list(lists:usort(AcksL ++ ?QUEUE:to_list(UAMQ))),
+ {reply, #'tx.rollback_ok'{}, State#ch{unacked_message_q = UAMQ1,
+ tx = new_tx()}};
+
+handle_method(#'confirm.select'{}, _, #ch{tx = {_, _}}) ->
+ precondition_failed("cannot switch from tx to confirm mode");
+
+handle_method(#'confirm.select'{nowait = NoWait}, _, State) ->
+ return_ok(State#ch{confirm_enabled = true},
+ NoWait, #'confirm.select_ok'{});
+
+handle_method(#'channel.flow'{active = true}, _, State) ->
+ {reply, #'channel.flow_ok'{active = true}, State};
+
+handle_method(#'channel.flow'{active = false}, _, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "active=false", []);
+
+handle_method(#'basic.credit'{consumer_tag = CTag,
+ credit = Credit,
+ drain = Drain},
+ _, State = #ch{consumer_mapping = Consumers,
+ queue_states = QStates0}) ->
+ case maps:find(CTag, Consumers) of
+ {ok, {Q, _CParams}} ->
+ {ok, QStates, Actions} = rabbit_queue_type:credit(Q, CTag, Credit, Drain, QStates0),
+ {noreply, handle_queue_actions(Actions, State#ch{queue_states = QStates})};
+ error -> precondition_failed(
+ "unknown consumer tag '~s'", [CTag])
+ end;
+
+handle_method(_MethodRecord, _Content, _State) ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unimplemented method", []).
+
+%%----------------------------------------------------------------------------
+
+%% We get the queue process to send the consume_ok on our behalf. This
+%% is for symmetry with basic.cancel - see the comment in that method
+%% for why.
+basic_consume(QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args, NoWait,
+ State = #ch{cfg = #conf{conn_pid = ConnPid,
+ user = #user{username = Username}},
+ limiter = Limiter,
+ consumer_mapping = ConsumerMapping,
+ queue_states = QueueStates0}) ->
+ case rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ fun (Q) ->
+ {rabbit_amqqueue:basic_consume(
+ Q, NoAck, self(),
+ rabbit_limiter:pid(Limiter),
+ rabbit_limiter:is_active(Limiter),
+ ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args,
+ ok_msg(NoWait, #'basic.consume_ok'{
+ consumer_tag = ActualConsumerTag}),
+ Username, QueueStates0),
+ Q}
+ end) of
+ {{ok, QueueStates, Actions}, Q} when ?is_amqqueue(Q) ->
+ CM1 = maps:put(
+ ActualConsumerTag,
+ {Q, {NoAck, ConsumerPrefetch, ExclusiveConsume, Args}},
+ ConsumerMapping),
+
+ State1 = State#ch{consumer_mapping = CM1,
+ queue_states = QueueStates},
+ State2 = handle_queue_actions(Actions, State1),
+ {ok, case NoWait of
+ true -> consumer_monitor(ActualConsumerTag, State2);
+ false -> State2
+ end};
+ {{error, exclusive_consume_unavailable} = E, _Q} ->
+ E;
+ {{error, global_qos_not_supported_for_queue_type} = E, _Q} ->
+ E;
+ {{protocol_error, Type, Reason, ReasonArgs}, _Q} ->
+ rabbit_misc:protocol_error(Type, Reason, ReasonArgs)
+ end.
+
+maybe_stat(false, Q) -> rabbit_amqqueue:stat(Q);
+maybe_stat(true, _Q) -> {ok, 0, 0}.
+
+consumer_monitor(ConsumerTag,
+ State = #ch{consumer_mapping = ConsumerMapping,
+ queue_consumers = QCons}) ->
+ {Q, _} = maps:get(ConsumerTag, ConsumerMapping),
+ QRef = amqqueue:get_name(Q),
+ CTags1 = case maps:find(QRef, QCons) of
+ {ok, CTags} -> gb_sets:insert(ConsumerTag, CTags);
+ error -> gb_sets:singleton(ConsumerTag)
+ end,
+ QCons1 = maps:put(QRef, CTags1, QCons),
+ State#ch{queue_consumers = QCons1}.
+
+handle_consuming_queue_down_or_eol(QName,
+ State = #ch{queue_consumers = QCons}) ->
+ ConsumerTags = case maps:find(QName, QCons) of
+ error -> gb_sets:new();
+ {ok, CTags} -> CTags
+ end,
+ gb_sets:fold(
+ fun (CTag, StateN = #ch{consumer_mapping = CMap}) ->
+ case queue_down_consumer_action(CTag, CMap) of
+ remove ->
+ cancel_consumer(CTag, QName, StateN);
+ {recover, {NoAck, ConsumerPrefetch, Exclusive, Args}} ->
+ case catch basic_consume(
+ QName, NoAck, ConsumerPrefetch, CTag,
+ Exclusive, Args, true, StateN) of
+ {ok, StateN1} ->
+ StateN1;
+ _Err ->
+ cancel_consumer(CTag, QName, StateN)
+ end
+ end
+ end, State#ch{queue_consumers = maps:remove(QName, QCons)}, ConsumerTags).
+
+%% [0] There is a slight danger here that if a queue is deleted and
+%% then recreated again the reconsume will succeed even though it was
+%% not an HA failover. But the likelihood is not great and most users
+%% are unlikely to care.
+
+cancel_consumer(CTag, QName,
+ State = #ch{cfg = #conf{capabilities = Capabilities},
+ consumer_mapping = CMap}) ->
+ case rabbit_misc:table_lookup(
+ Capabilities, <<"consumer_cancel_notify">>) of
+ {bool, true} -> ok = send(#'basic.cancel'{consumer_tag = CTag,
+ nowait = true}, State);
+ _ -> ok
+ end,
+ rabbit_event:notify(consumer_deleted, [{consumer_tag, CTag},
+ {channel, self()},
+ {queue, QName}]),
+ State#ch{consumer_mapping = maps:remove(CTag, CMap)}.
+
+queue_down_consumer_action(CTag, CMap) ->
+ {_, {_, _, _, Args} = ConsumeSpec} = maps:get(CTag, CMap),
+ case rabbit_misc:table_lookup(Args, <<"x-cancel-on-ha-failover">>) of
+ {bool, true} -> remove;
+ _ -> {recover, ConsumeSpec}
+ end.
+
+binding_action(Fun, SourceNameBin0, DestinationType, DestinationNameBin0,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext,
+ #user{username = Username} = User) ->
+ ExchangeNameBin = strip_cr_lf(SourceNameBin0),
+ DestinationNameBin = strip_cr_lf(DestinationNameBin0),
+ DestinationName = name_to_resource(DestinationType, DestinationNameBin, VHostPath),
+ check_write_permitted(DestinationName, User, AuthzContext),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ [check_not_default_exchange(N) || N <- [DestinationName, ExchangeName]],
+ check_read_permitted(ExchangeName, User, AuthzContext),
+ case rabbit_exchange:lookup(ExchangeName) of
+ {error, not_found} ->
+ ok;
+ {ok, Exchange} ->
+ check_read_permitted_on_topic(Exchange, User, RoutingKey, AuthzContext)
+ end,
+ case Fun(#binding{source = ExchangeName,
+ destination = DestinationName,
+ key = RoutingKey,
+ args = Arguments},
+ fun (_X, Q) when ?is_amqqueue(Q) ->
+ try rabbit_amqqueue:check_exclusive_access(Q, ConnPid)
+ catch exit:Reason -> {error, Reason}
+ end;
+ (_X, #exchange{}) ->
+ ok
+ end,
+ Username) of
+ {error, {resources_missing, [{not_found, Name} | _]}} ->
+ rabbit_amqqueue:not_found(Name);
+ {error, {resources_missing, [{absent, Q, Reason} | _]}} ->
+ rabbit_amqqueue:absent(Q, Reason);
+ {error, binding_not_found} ->
+ rabbit_misc:protocol_error(
+ not_found, "no binding ~s between ~s and ~s",
+ [RoutingKey, rabbit_misc:rs(ExchangeName),
+ rabbit_misc:rs(DestinationName)]);
+ {error, {binding_invalid, Fmt, Args}} ->
+ rabbit_misc:protocol_error(precondition_failed, Fmt, Args);
+ {error, #amqp_error{} = Error} ->
+ rabbit_misc:protocol_error(Error);
+ ok ->
+ ok
+ end.
+
+basic_return(#basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content},
+ State = #ch{cfg = #conf{protocol = Protocol,
+ writer_pid = WriterPid}},
+ Reason) ->
+ ?INCR_STATS(exchange_stats, ExchangeName, 1, return_unroutable, State),
+ {_Close, ReplyCode, ReplyText} = Protocol:lookup_amqp_exception(Reason),
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.return'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey},
+ Content).
+
+reject(DeliveryTag, Requeue, Multiple,
+ State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
+ {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
+ State1 = State#ch{unacked_message_q = Remaining},
+ {noreply, case Tx of
+ none ->
+ {State2, Actions} = internal_reject(Requeue, Acked, State1#ch.limiter, State1),
+ handle_queue_actions(Actions, State2);
+ {Msgs, Acks} ->
+ Acks1 = ack_cons(Requeue, Acked, Acks),
+ State1#ch{tx = {Msgs, Acks1}}
+ end}.
+
+%% NB: Acked is in youngest-first order
+internal_reject(Requeue, Acked, Limiter,
+ State = #ch{queue_states = QueueStates0}) ->
+ {QueueStates, Actions} =
+ foreach_per_queue(
+ fun({QRef, CTag}, MsgIds, {Acc0, Actions0}) ->
+ Op = case Requeue of
+ false -> discard;
+ true -> requeue
+ end,
+ case rabbit_queue_type:settle(QRef, Op, CTag, MsgIds, Acc0) of
+ {ok, Acc, Actions} ->
+ {Acc, Actions0 ++ Actions};
+ {protocol_error, ErrorType, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(ErrorType, Reason, ReasonArgs)
+ end
+ end, Acked, {QueueStates0, []}),
+ ok = notify_limiter(Limiter, Acked),
+ {State#ch{queue_states = QueueStates}, Actions}.
+
+record_sent(Type, Tag, AckRequired,
+ Msg = {QName, _QPid, MsgId, Redelivered, _Message},
+ State = #ch{cfg = #conf{channel = ChannelNum,
+ trace_state = TraceState,
+ user = #user{username = Username},
+ conn_name = ConnName
+ },
+ unacked_message_q = UAMQ,
+ next_tag = DeliveryTag
+ }) ->
+ ?INCR_STATS(queue_stats, QName, 1, case {Type, AckRequired} of
+ {get, true} -> get;
+ {get, false} -> get_no_ack;
+ {deliver, true} -> deliver;
+ {deliver, false} -> deliver_no_ack
+ end, State),
+ case Redelivered of
+ true -> ?INCR_STATS(queue_stats, QName, 1, redeliver, State);
+ false -> ok
+ end,
+ DeliveredAt = os:system_time(millisecond),
+ rabbit_trace:tap_out(Msg, ConnName, ChannelNum, Username, TraceState),
+ UAMQ1 = case AckRequired of
+ true ->
+ ?QUEUE:in(#pending_ack{delivery_tag = DeliveryTag,
+ tag = Tag,
+ delivered_at = DeliveredAt,
+ queue = QName,
+ msg_id = MsgId}, UAMQ);
+ false ->
+ UAMQ
+ end,
+ State#ch{unacked_message_q = UAMQ1, next_tag = DeliveryTag + 1}.
+
+%% NB: returns acks in youngest-first order
+collect_acks(Q, 0, true) ->
+ {lists:reverse(?QUEUE:to_list(Q)), ?QUEUE:new()};
+collect_acks(Q, DeliveryTag, Multiple) ->
+ collect_acks([], [], Q, DeliveryTag, Multiple).
+
+collect_acks(ToAcc, PrefixAcc, Q, DeliveryTag, Multiple) ->
+ case ?QUEUE:out(Q) of
+ {{value, UnackedMsg = #pending_ack{delivery_tag = CurrentDeliveryTag}},
+ QTail} ->
+ if CurrentDeliveryTag == DeliveryTag ->
+ {[UnackedMsg | ToAcc],
+ case PrefixAcc of
+ [] -> QTail;
+ _ -> ?QUEUE:join(
+ ?QUEUE:from_list(lists:reverse(PrefixAcc)),
+ QTail)
+ end};
+ Multiple ->
+ collect_acks([UnackedMsg | ToAcc], PrefixAcc,
+ QTail, DeliveryTag, Multiple);
+ true ->
+ collect_acks(ToAcc, [UnackedMsg | PrefixAcc],
+ QTail, DeliveryTag, Multiple)
+ end;
+ {empty, _} ->
+ precondition_failed("unknown delivery tag ~w", [DeliveryTag])
+ end.
+
+%% NB: Acked is in youngest-first order
+ack(Acked, State = #ch{queue_states = QueueStates0}) ->
+ {QueueStates, Actions} =
+ foreach_per_queue(
+ fun ({QRef, CTag}, MsgIds, {Acc0, ActionsAcc0}) ->
+ case rabbit_queue_type:settle(QRef, complete, CTag,
+ MsgIds, Acc0) of
+ {ok, Acc, ActionsAcc} ->
+ incr_queue_stats(QRef, MsgIds, State),
+ {Acc, ActionsAcc0 ++ ActionsAcc};
+ {protocol_error, ErrorType, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(ErrorType, Reason, ReasonArgs)
+ end
+ end, Acked, {QueueStates0, []}),
+ ok = notify_limiter(State#ch.limiter, Acked),
+ {State#ch{queue_states = QueueStates}, Actions}.
+
+incr_queue_stats(QName, MsgIds, State) ->
+ Count = length(MsgIds),
+ ?INCR_STATS(queue_stats, QName, Count, ack, State).
+
+%% {Msgs, Acks}
+%%
+%% Msgs is a queue.
+%%
+%% Acks looks s.t. like this:
+%% [{false,[5,4]},{true,[3]},{ack,[2,1]}, ...]
+%%
+%% Each element is a pair consisting of a tag and a list of
+%% ack'ed/reject'ed msg ids. The tag is one of 'ack' (to ack), 'true'
+%% (reject w requeue), 'false' (reject w/o requeue). The msg ids, as
+%% well as the list overall, are in "most-recent (generally youngest)
+%% ack first" order.
+new_tx() -> {?QUEUE:new(), []}.
+
+notify_queues(State = #ch{cfg = #conf{state = closing}}) ->
+ {ok, State};
+notify_queues(State = #ch{consumer_mapping = Consumers,
+ cfg = Cfg}) ->
+ QPids = classic_consumer_queue_pids(Consumers),
+ Timeout = get_operation_timeout(),
+ {rabbit_amqqueue:notify_down_all(QPids, self(), Timeout),
+ State#ch{cfg = Cfg#conf{state = closing}}}.
+
+foreach_per_queue(_F, [], Acc) ->
+ Acc;
+foreach_per_queue(F, [#pending_ack{tag = CTag,
+ queue = QName,
+ msg_id = MsgId}], Acc) ->
+ %% quorum queue, needs the consumer tag
+ F({QName, CTag}, [MsgId], Acc);
+foreach_per_queue(F, UAL, Acc) ->
+ T = lists:foldl(fun (#pending_ack{tag = CTag,
+ queue = QName,
+ msg_id = MsgId}, T) ->
+ rabbit_misc:gb_trees_cons({QName, CTag}, MsgId, T)
+ end, gb_trees:empty(), UAL),
+ rabbit_misc:gb_trees_fold(fun (Key, Val, Acc0) -> F(Key, Val, Acc0) end, Acc, T).
+
+%% hack to patch up missing queue type behaviour for classic queue
+classic_consumer_queue_pids(Consumers) ->
+ lists:usort([amqqueue:get_pid(Q)
+ || {Q, _CParams} <- maps:values(Consumers),
+ amqqueue:get_type(Q) == rabbit_classic_queue]).
+
+%% tell the limiter about the number of acks that have been received
+%% for messages delivered to subscribed consumers, but not acks for
+%% messages sent in a response to a basic.get (identified by their
+%% consumer tag as an integer (the same as the delivery tag, required
+%% quorum queues))
+notify_limiter(Limiter, Acked) ->
+ %% optimisation: avoid the potentially expensive 'foldl' in the
+ %% common case.
+ case rabbit_limiter:is_active(Limiter) of
+ false -> ok;
+ true -> case lists:foldl(fun ({_, CTag, _, _}, Acc) when is_integer(CTag) ->
+ %% Quorum queues use integer CTags
+ %% classic queues use binaries
+ %% Quorum queues do not interact
+ %% with limiters
+ Acc;
+ ({_, _, _, _}, Acc) -> Acc + 1
+ end, 0, Acked) of
+ 0 -> ok;
+ Count -> rabbit_limiter:ack(Limiter, Count)
+ end
+ end.
+
+deliver_to_queues({#delivery{message = #basic_message{exchange_name = XName},
+ confirm = false,
+ mandatory = false},
+ _RoutedToQs = []}, State) -> %% optimisation
+ ?INCR_STATS(exchange_stats, XName, 1, publish, State),
+ ?INCR_STATS(exchange_stats, XName, 1, drop_unroutable, State),
+ State;
+deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{
+ exchange_name = XName},
+ mandatory = Mandatory,
+ confirm = Confirm,
+ msg_seq_no = MsgSeqNo},
+ DelQNames}, State0 = #ch{queue_states = QueueStates0}) ->
+ Qs = rabbit_amqqueue:lookup(DelQNames),
+ AllQueueNames = lists:foldl(fun (Q, Acc) ->
+ QRef = amqqueue:get_name(Q),
+ [QRef | Acc]
+ end, [], Qs),
+ {ok, QueueStates, Actions} =
+ rabbit_queue_type:deliver(Qs, Delivery, QueueStates0),
+ %% NB: the order here is important since basic.returns must be
+ %% sent before confirms.
+ ok = process_routing_mandatory(Mandatory, Qs, Message, State0),
+ State1 = process_routing_confirm(Confirm, AllQueueNames,
+ MsgSeqNo, XName, State0),
+ %% Actions must be processed after registering confirms as actions may
+ %% contain rejections of publishes
+ State = handle_queue_actions(Actions,
+ State1#ch{queue_states = QueueStates}),
+ case rabbit_event:stats_level(State, #ch.stats_timer) of
+ fine ->
+ ?INCR_STATS(exchange_stats, XName, 1, publish),
+ [?INCR_STATS(queue_exchange_stats,
+ {amqqueue:get_name(Q), XName}, 1, publish)
+ || Q <- Qs];
+ _ ->
+ ok
+ end,
+ State.
+
+process_routing_mandatory(_Mandatory = true,
+ _RoutedToQs = [],
+ Msg, State) ->
+ ok = basic_return(Msg, State, no_route),
+ ok;
+process_routing_mandatory(_Mandatory = false,
+ _RoutedToQs = [],
+ #basic_message{exchange_name = ExchangeName}, State) ->
+ ?INCR_STATS(exchange_stats, ExchangeName, 1, drop_unroutable, State),
+ ok;
+process_routing_mandatory(_, _, _, _) ->
+ ok.
+
+process_routing_confirm(false, _, _, _, State) ->
+ State;
+process_routing_confirm(true, [], MsgSeqNo, XName, State) ->
+ record_confirms([{MsgSeqNo, XName}], State);
+process_routing_confirm(true, QRefs, MsgSeqNo, XName, State) ->
+ State#ch{unconfirmed =
+ rabbit_confirms:insert(MsgSeqNo, QRefs, XName, State#ch.unconfirmed)}.
+
+confirm(MsgSeqNos, QRef, State = #ch{unconfirmed = UC}) ->
+ %% NOTE: if queue name does not exist here it's likely that the ref also
+ %% does not exist in unconfirmed messages.
+ %% Neither does the 'ignore' atom, so it's a reasonable fallback.
+ {ConfirmMXs, UC1} = rabbit_confirms:confirm(MsgSeqNos, QRef, UC),
+ %% NB: don't call noreply/1 since we don't want to send confirms.
+ record_confirms(ConfirmMXs, State#ch{unconfirmed = UC1}).
+
+send_confirms_and_nacks(State = #ch{tx = none, confirmed = [], rejected = []}) ->
+ State;
+send_confirms_and_nacks(State = #ch{tx = none, confirmed = C, rejected = R}) ->
+ case rabbit_node_monitor:pause_partition_guard() of
+ ok ->
+ Confirms = lists:append(C),
+ Rejects = lists:append(R),
+ ConfirmMsgSeqNos =
+ lists:foldl(
+ fun ({MsgSeqNo, XName}, MSNs) ->
+ ?INCR_STATS(exchange_stats, XName, 1, confirm, State),
+ [MsgSeqNo | MSNs]
+ end, [], Confirms),
+ RejectMsgSeqNos = [MsgSeqNo || {MsgSeqNo, _} <- Rejects],
+
+ State1 = send_confirms(ConfirmMsgSeqNos,
+ RejectMsgSeqNos,
+ State#ch{confirmed = []}),
+ %% TODO: msg seq nos, same as for confirms. Need to implement
+ %% nack rates first.
+ send_nacks(RejectMsgSeqNos,
+ ConfirmMsgSeqNos,
+ State1#ch{rejected = []});
+ pausing -> State
+ end;
+send_confirms_and_nacks(State) ->
+ case rabbit_node_monitor:pause_partition_guard() of
+ ok -> maybe_complete_tx(State);
+ pausing -> State
+ end.
+
+send_nacks([], _, State) ->
+ State;
+send_nacks(_Rs, _, State = #ch{cfg = #conf{state = closing}}) -> %% optimisation
+ State;
+send_nacks(Rs, Cs, State) ->
+ coalesce_and_send(Rs, Cs,
+ fun(MsgSeqNo, Multiple) ->
+ #'basic.nack'{delivery_tag = MsgSeqNo,
+ multiple = Multiple}
+ end, State).
+
+send_confirms([], _, State) ->
+ State;
+send_confirms(_Cs, _, State = #ch{cfg = #conf{state = closing}}) -> %% optimisation
+ State;
+send_confirms([MsgSeqNo], _, State) ->
+ ok = send(#'basic.ack'{delivery_tag = MsgSeqNo}, State),
+ State;
+send_confirms(Cs, Rs, State) ->
+ coalesce_and_send(Cs, Rs,
+ fun(MsgSeqNo, Multiple) ->
+ #'basic.ack'{delivery_tag = MsgSeqNo,
+ multiple = Multiple}
+ end, State).
+
+coalesce_and_send(MsgSeqNos, NegativeMsgSeqNos, MkMsgFun, State = #ch{unconfirmed = UC}) ->
+ SMsgSeqNos = lists:usort(MsgSeqNos),
+ UnconfirmedCutoff = case rabbit_confirms:is_empty(UC) of
+ true -> lists:last(SMsgSeqNos) + 1;
+ false -> rabbit_confirms:smallest(UC)
+ end,
+ Cutoff = lists:min([UnconfirmedCutoff | NegativeMsgSeqNos]),
+ {Ms, Ss} = lists:splitwith(fun(X) -> X < Cutoff end, SMsgSeqNos),
+ case Ms of
+ [] -> ok;
+ _ -> ok = send(MkMsgFun(lists:last(Ms), true), State)
+ end,
+ [ok = send(MkMsgFun(SeqNo, false), State) || SeqNo <- Ss],
+ State.
+
+ack_cons(Tag, Acked, [{Tag, Acks} | L]) -> [{Tag, Acked ++ Acks} | L];
+ack_cons(Tag, Acked, Acks) -> [{Tag, Acked} | Acks].
+
+ack_len(Acks) -> lists:sum([length(L) || {ack, L} <- Acks]).
+
+maybe_complete_tx(State = #ch{tx = {_, _}}) ->
+ State;
+maybe_complete_tx(State = #ch{unconfirmed = UC}) ->
+ case rabbit_confirms:is_empty(UC) of
+ false -> State;
+ true -> complete_tx(State#ch{confirmed = []})
+ end.
+
+complete_tx(State = #ch{tx = committing}) ->
+ ok = send(#'tx.commit_ok'{}, State),
+ State#ch{tx = new_tx()};
+complete_tx(State = #ch{tx = failed}) ->
+ {noreply, State1} = handle_exception(
+ rabbit_misc:amqp_error(
+ precondition_failed, "partial tx completion", [],
+ 'tx.commit'),
+ State),
+ State1#ch{tx = new_tx()}.
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+infos(Items, Deadline, State) ->
+ [begin
+ Now = now_millis(),
+ if
+ Now > Deadline ->
+ throw(timeout);
+ true ->
+ {Item, i(Item, State)}
+ end
+ end || Item <- Items].
+
+i(pid, _) -> self();
+i(connection, #ch{cfg = #conf{conn_pid = ConnPid}}) -> ConnPid;
+i(number, #ch{cfg = #conf{channel = Channel}}) -> Channel;
+i(user, #ch{cfg = #conf{user = User}}) -> User#user.username;
+i(user_who_performed_action, Ch) -> i(user, Ch);
+i(vhost, #ch{cfg = #conf{virtual_host = VHost}}) -> VHost;
+i(transactional, #ch{tx = Tx}) -> Tx =/= none;
+i(confirm, #ch{confirm_enabled = CE}) -> CE;
+i(name, State) -> name(State);
+i(consumer_count, #ch{consumer_mapping = CM}) -> maps:size(CM);
+i(messages_unconfirmed, #ch{unconfirmed = UC}) -> rabbit_confirms:size(UC);
+i(messages_unacknowledged, #ch{unacked_message_q = UAMQ}) -> ?QUEUE:len(UAMQ);
+i(messages_uncommitted, #ch{tx = {Msgs, _Acks}}) -> ?QUEUE:len(Msgs);
+i(messages_uncommitted, #ch{}) -> 0;
+i(acks_uncommitted, #ch{tx = {_Msgs, Acks}}) -> ack_len(Acks);
+i(acks_uncommitted, #ch{}) -> 0;
+i(pending_raft_commands, #ch{queue_states = QS}) ->
+ pending_raft_commands(QS);
+i(state, #ch{cfg = #conf{state = running}}) -> credit_flow:state();
+i(state, #ch{cfg = #conf{state = State}}) -> State;
+i(prefetch_count, #ch{cfg = #conf{consumer_prefetch = C}}) -> C;
+i(global_prefetch_count, #ch{limiter = Limiter}) ->
+ rabbit_limiter:get_prefetch_limit(Limiter);
+i(interceptors, #ch{interceptor_state = IState}) ->
+ IState;
+i(garbage_collection, _State) ->
+ rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+i(Item, _) ->
+ throw({bad_argument, Item}).
+
+pending_raft_commands(QStates) ->
+ Fun = fun(_, V, Acc) ->
+ case rabbit_queue_type:state_info(V) of
+ #{pending_raft_commands := P} ->
+ Acc + P;
+ _ ->
+ Acc
+ end
+ end,
+ rabbit_queue_type:fold_state(Fun, 0, QStates).
+
+name(#ch{cfg = #conf{conn_name = ConnName, channel = Channel}}) ->
+ list_to_binary(rabbit_misc:format("~s (~p)", [ConnName, Channel])).
+
+emit_stats(State) -> emit_stats(State, []).
+
+emit_stats(State, Extra) ->
+ [{reductions, Red} | Coarse0] = infos(?STATISTICS_KEYS, State),
+ %% First metric must be `idle_since` (if available), as expected by
+ %% `rabbit_mgmt_format:format_channel_stats`. This is a performance
+ %% optimisation that avoids traversing the whole list when only
+ %% one element has to be formatted.
+ rabbit_core_metrics:channel_stats(self(), Extra ++ Coarse0),
+ rabbit_core_metrics:channel_stats(reductions, self(), Red).
+
+erase_queue_stats(QName) ->
+ rabbit_core_metrics:channel_queue_down({self(), QName}),
+ erase({queue_stats, QName}),
+ [begin
+ rabbit_core_metrics:channel_queue_exchange_down({self(), QX}),
+ erase({queue_exchange_stats, QX})
+ end || {{queue_exchange_stats, QX = {QName0, _}}, _} <- get(),
+ QName0 =:= QName].
+
+get_vhost(#ch{cfg = #conf{virtual_host = VHost}}) -> VHost.
+
+get_user(#ch{cfg = #conf{user = User}}) -> User.
+
+delete_stats({queue_stats, QName}) ->
+ rabbit_core_metrics:channel_queue_down({self(), QName});
+delete_stats({exchange_stats, XName}) ->
+ rabbit_core_metrics:channel_exchange_down({self(), XName});
+delete_stats({queue_exchange_stats, QX}) ->
+ rabbit_core_metrics:channel_queue_exchange_down({self(), QX});
+delete_stats(_) ->
+ ok.
+
+put_operation_timeout() ->
+ put(channel_operation_timeout, ?CHANNEL_OPERATION_TIMEOUT).
+
+get_operation_timeout() ->
+ get(channel_operation_timeout).
+
+%% Refactored and exported to allow direct calls from the HTTP API,
+%% avoiding the usage of AMQP 0-9-1 from the management.
+
+handle_method(#'exchange.bind'{destination = DestinationNameBin,
+ source = SourceNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments},
+ ConnPid, AuthzContext, _CollectorId, VHostPath, User) ->
+ binding_action(fun rabbit_binding:add/3,
+ SourceNameBin, exchange, DestinationNameBin,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User);
+handle_method(#'exchange.unbind'{destination = DestinationNameBin,
+ source = SourceNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments},
+ ConnPid, AuthzContext, _CollectorId, VHostPath, User) ->
+ binding_action(fun rabbit_binding:remove/3,
+ SourceNameBin, exchange, DestinationNameBin,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User);
+handle_method(#'queue.unbind'{queue = QueueNameBin,
+ exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments},
+ ConnPid, AuthzContext, _CollectorId, VHostPath, User) ->
+ binding_action(fun rabbit_binding:remove/3,
+ ExchangeNameBin, queue, QueueNameBin,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User);
+handle_method(#'queue.bind'{queue = QueueNameBin,
+ exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments},
+ ConnPid, AuthzContext, _CollectorId, VHostPath, User) ->
+ binding_action(fun rabbit_binding:add/3,
+ ExchangeNameBin, queue, QueueNameBin,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User);
+%% Note that all declares to these are effectively passive. If it
+%% exists it by definition has one consumer.
+handle_method(#'queue.declare'{queue = <<"amq.rabbitmq.reply-to",
+ _/binary>> = QueueNameBin},
+ _ConnPid, _AuthzContext, _CollectorPid, VHost, _User) ->
+ StrippedQueueNameBin = strip_cr_lf(QueueNameBin),
+ QueueName = rabbit_misc:r(VHost, queue, StrippedQueueNameBin),
+ case declare_fast_reply_to(StrippedQueueNameBin) of
+ exists -> {ok, QueueName, 0, 1};
+ not_found -> rabbit_amqqueue:not_found(QueueName)
+ end;
+handle_method(#'queue.declare'{queue = QueueNameBin,
+ passive = false,
+ durable = DurableDeclare,
+ exclusive = ExclusiveDeclare,
+ auto_delete = AutoDelete,
+ nowait = NoWait,
+ arguments = Args} = Declare,
+ ConnPid, AuthzContext, CollectorPid, VHostPath,
+ #user{username = Username} = User) ->
+ Owner = case ExclusiveDeclare of
+ true -> ConnPid;
+ false -> none
+ end,
+ StrippedQueueNameBin = strip_cr_lf(QueueNameBin),
+ Durable = DurableDeclare andalso not ExclusiveDeclare,
+ ActualNameBin = case StrippedQueueNameBin of
+ <<>> ->
+ case rabbit_amqqueue:is_server_named_allowed(Args) of
+ true ->
+ rabbit_guid:binary(rabbit_guid:gen_secure(), "amq.gen");
+ false ->
+ rabbit_misc:protocol_error(
+ precondition_failed,
+ "Cannot declare a server-named queue for type ~p",
+ [rabbit_amqqueue:get_queue_type(Args)])
+ end;
+ Other -> check_name('queue', Other)
+ end,
+ QueueName = rabbit_misc:r(VHostPath, queue, ActualNameBin),
+ check_configure_permitted(QueueName, User, AuthzContext),
+ rabbit_core_metrics:queue_declared(QueueName),
+ case rabbit_amqqueue:with(
+ QueueName,
+ fun (Q) -> ok = rabbit_amqqueue:assert_equivalence(
+ Q, Durable, AutoDelete, Args, Owner),
+ maybe_stat(NoWait, Q)
+ end) of
+ {ok, MessageCount, ConsumerCount} ->
+ {ok, QueueName, MessageCount, ConsumerCount};
+ {error, not_found} ->
+ %% enforce the limit for newly declared queues only
+ check_vhost_queue_limit(QueueName, VHostPath),
+ DlxKey = <<"x-dead-letter-exchange">>,
+ case rabbit_misc:r_arg(VHostPath, exchange, Args, DlxKey) of
+ undefined ->
+ ok;
+ {error, {invalid_type, Type}} ->
+ precondition_failed(
+ "invalid type '~s' for arg '~s' in ~s",
+ [Type, DlxKey, rabbit_misc:rs(QueueName)]);
+ DLX ->
+ check_read_permitted(QueueName, User, AuthzContext),
+ check_write_permitted(DLX, User, AuthzContext),
+ ok
+ end,
+ case rabbit_amqqueue:declare(QueueName, Durable, AutoDelete,
+ Args, Owner, Username) of
+ {new, Q} when ?is_amqqueue(Q) ->
+ %% We need to notify the reader within the channel
+ %% process so that we can be sure there are no
+ %% outstanding exclusive queues being declared as
+ %% the connection shuts down.
+ QPid = amqqueue:get_pid(Q),
+ ok = case {Owner, CollectorPid} of
+ {none, _} -> ok;
+ {_, none} -> ok; %% Supports call from mgmt API
+ _ -> rabbit_queue_collector:register(
+ CollectorPid, QPid)
+ end,
+ rabbit_core_metrics:queue_created(QueueName),
+ {ok, QueueName, 0, 0};
+ {existing, _Q} ->
+ %% must have been created between the stat and the
+ %% declare. Loop around again.
+ handle_method(Declare, ConnPid, AuthzContext, CollectorPid, VHostPath,
+ User);
+ {absent, Q, Reason} ->
+ rabbit_amqqueue:absent(Q, Reason);
+ {owner_died, _Q} ->
+ %% Presumably our own days are numbered since the
+ %% connection has died. Pretend the queue exists though,
+ %% just so nothing fails.
+ {ok, QueueName, 0, 0};
+ {protocol_error, ErrorType, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(ErrorType, Reason, ReasonArgs)
+ end;
+ {error, {absent, Q, Reason}} ->
+ rabbit_amqqueue:absent(Q, Reason)
+ end;
+handle_method(#'queue.declare'{queue = QueueNameBin,
+ nowait = NoWait,
+ passive = true},
+ ConnPid, _AuthzContext, _CollectorPid, VHostPath, _User) ->
+ StrippedQueueNameBin = strip_cr_lf(QueueNameBin),
+ QueueName = rabbit_misc:r(VHostPath, queue, StrippedQueueNameBin),
+ Fun = fun (Q0) ->
+ QStat = maybe_stat(NoWait, Q0),
+ {QStat, Q0}
+ end,
+ %% Note: no need to check if Q is an #amqqueue, with_or_die does it
+ {{ok, MessageCount, ConsumerCount}, Q} = rabbit_amqqueue:with_or_die(QueueName, Fun),
+ ok = rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
+ {ok, QueueName, MessageCount, ConsumerCount};
+handle_method(#'queue.delete'{queue = QueueNameBin,
+ if_unused = IfUnused,
+ if_empty = IfEmpty},
+ ConnPid, AuthzContext, _CollectorPid, VHostPath,
+ User = #user{username = Username}) ->
+ StrippedQueueNameBin = strip_cr_lf(QueueNameBin),
+ QueueName = qbin_to_resource(StrippedQueueNameBin, VHostPath),
+
+ check_configure_permitted(QueueName, User, AuthzContext),
+ case rabbit_amqqueue:with(
+ QueueName,
+ fun (Q) ->
+ rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
+ rabbit_queue_type:delete(Q, IfUnused, IfEmpty, Username)
+ end,
+ fun (not_found) ->
+ {ok, 0};
+ ({absent, Q, crashed}) ->
+ _ = rabbit_classic_queue:delete_crashed(Q, Username),
+ {ok, 0};
+ ({absent, Q, stopped}) ->
+ _ = rabbit_classic_queue:delete_crashed(Q, Username),
+ {ok, 0};
+ ({absent, Q, Reason}) ->
+ rabbit_amqqueue:absent(Q, Reason)
+ end) of
+ {error, in_use} ->
+ precondition_failed("~s in use", [rabbit_misc:rs(QueueName)]);
+ {error, not_empty} ->
+ precondition_failed("~s not empty", [rabbit_misc:rs(QueueName)]);
+ {ok, Count} ->
+ {ok, Count};
+ {protocol_error, Type, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(Type, Reason, ReasonArgs)
+ end;
+handle_method(#'exchange.delete'{exchange = ExchangeNameBin,
+ if_unused = IfUnused},
+ _ConnPid, AuthzContext, _CollectorPid, VHostPath,
+ User = #user{username = Username}) ->
+ StrippedExchangeNameBin = strip_cr_lf(ExchangeNameBin),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, StrippedExchangeNameBin),
+ check_not_default_exchange(ExchangeName),
+ check_exchange_deletion(ExchangeName),
+ check_configure_permitted(ExchangeName, User, AuthzContext),
+ case rabbit_exchange:delete(ExchangeName, IfUnused, Username) of
+ {error, not_found} ->
+ ok;
+ {error, in_use} ->
+ precondition_failed("~s in use", [rabbit_misc:rs(ExchangeName)]);
+ ok ->
+ ok
+ end;
+handle_method(#'queue.purge'{queue = QueueNameBin},
+ ConnPid, AuthzContext, _CollectorPid, VHostPath, User) ->
+ QueueName = qbin_to_resource(QueueNameBin, VHostPath),
+ check_read_permitted(QueueName, User, AuthzContext),
+ rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ fun (Q) ->
+ case rabbit_queue_type:purge(Q) of
+ {ok, _} = Res ->
+ Res;
+ {error, not_supported} ->
+ rabbit_misc:protocol_error(
+ not_implemented,
+ "queue.purge not supported by stream queues ~s",
+ [rabbit_misc:rs(amqqueue:get_name(Q))])
+ end
+ end);
+handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
+ type = TypeNameBin,
+ passive = false,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Args},
+ _ConnPid, AuthzContext, _CollectorPid, VHostPath,
+ #user{username = Username} = User) ->
+ CheckedType = rabbit_exchange:check_type(TypeNameBin),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, strip_cr_lf(ExchangeNameBin)),
+ check_not_default_exchange(ExchangeName),
+ check_configure_permitted(ExchangeName, User, AuthzContext),
+ X = case rabbit_exchange:lookup(ExchangeName) of
+ {ok, FoundX} -> FoundX;
+ {error, not_found} ->
+ check_name('exchange', strip_cr_lf(ExchangeNameBin)),
+ AeKey = <<"alternate-exchange">>,
+ case rabbit_misc:r_arg(VHostPath, exchange, Args, AeKey) of
+ undefined -> ok;
+ {error, {invalid_type, Type}} ->
+ precondition_failed(
+ "invalid type '~s' for arg '~s' in ~s",
+ [Type, AeKey, rabbit_misc:rs(ExchangeName)]);
+ AName -> check_read_permitted(ExchangeName, User, AuthzContext),
+ check_write_permitted(AName, User, AuthzContext),
+ ok
+ end,
+ rabbit_exchange:declare(ExchangeName,
+ CheckedType,
+ Durable,
+ AutoDelete,
+ Internal,
+ Args,
+ Username)
+ end,
+ ok = rabbit_exchange:assert_equivalence(X, CheckedType, Durable,
+ AutoDelete, Internal, Args);
+handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
+ passive = true},
+ _ConnPid, _AuthzContext, _CollectorPid, VHostPath, _User) ->
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, strip_cr_lf(ExchangeNameBin)),
+ check_not_default_exchange(ExchangeName),
+ _ = rabbit_exchange:lookup_or_die(ExchangeName).
+
+handle_deliver(CTag, Ack, Msgs, State) when is_list(Msgs) ->
+ lists:foldl(fun(Msg, S) ->
+ handle_deliver0(CTag, Ack, Msg, S)
+ end, State, Msgs);
+handle_deliver(CTag, Ack, Msg, State) ->
+ %% backwards compatibility clause
+ handle_deliver0(CTag, Ack, Msg, State).
+
+handle_deliver0(ConsumerTag, AckRequired,
+ Msg = {QName, QPid, _MsgId, Redelivered,
+ #basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content}},
+ State = #ch{cfg = #conf{writer_pid = WriterPid,
+ writer_gc_threshold = GCThreshold},
+ next_tag = DeliveryTag,
+ queue_states = Qs}) ->
+ Deliver = #'basic.deliver'{consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ redelivered = Redelivered,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey},
+ case rabbit_queue_type:module(QName, Qs) of
+ {ok, rabbit_classic_queue} ->
+ ok = rabbit_writer:send_command_and_notify(
+ WriterPid, QPid, self(), Deliver, Content);
+ _ ->
+ ok = rabbit_writer:send_command(WriterPid, Deliver, Content)
+ end,
+ case GCThreshold of
+ undefined -> ok;
+ _ -> rabbit_basic:maybe_gc_large_msg(Content, GCThreshold)
+ end,
+ record_sent(deliver, ConsumerTag, AckRequired, Msg, State).
+
+handle_basic_get(WriterPid, DeliveryTag, NoAck, MessageCount,
+ Msg = {_QName, _QPid, _MsgId, Redelivered,
+ #basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content}}, State) ->
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.get_ok'{delivery_tag = DeliveryTag,
+ redelivered = Redelivered,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey,
+ message_count = MessageCount},
+ Content),
+ {noreply, record_sent(get, DeliveryTag, not(NoAck), Msg, State)}.
+
+init_tick_timer(State = #ch{tick_timer = undefined}) ->
+ {ok, Interval} = application:get_env(rabbit, channel_tick_interval),
+ State#ch{tick_timer = erlang:send_after(Interval, self(), tick)};
+init_tick_timer(State) ->
+ State.
+
+reset_tick_timer(State) ->
+ State#ch{tick_timer = undefined}.
+
+maybe_cancel_tick_timer(#ch{tick_timer = undefined} = State) ->
+ State;
+maybe_cancel_tick_timer(#ch{tick_timer = TRef,
+ unacked_message_q = UMQ} = State) ->
+ case ?QUEUE:len(UMQ) of
+ 0 ->
+ %% we can only cancel the tick timer if the unacked messages
+ %% queue is empty.
+ _ = erlang:cancel_timer(TRef),
+ State#ch{tick_timer = undefined};
+ _ ->
+ %% let the timer continue
+ State
+ end.
+
+now_millis() ->
+ erlang:monotonic_time(millisecond).
+
+get_operation_timeout_and_deadline() ->
+ % NB: can't use get_operation_timeout because
+ % this code may not be running via the channel Pid
+ Timeout = ?CHANNEL_OPERATION_TIMEOUT,
+ Deadline = now_millis() + Timeout,
+ {Timeout, Deadline}.
+
+queue_fold(Fun, Init, Q) ->
+ case ?QUEUE:out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
+ end.
+
+evaluate_consumer_timeout(State0 = #ch{cfg = #conf{channel = Channel,
+ consumer_timeout = Timeout},
+ unacked_message_q = UAMQ}) ->
+ Now = os:system_time(millisecond),
+ case ?QUEUE:peek(UAMQ) of
+ {value, #pending_ack{delivery_tag = ConsumerTag,
+ delivered_at = Time}}
+ when is_integer(Timeout)
+ andalso Time < Now - Timeout ->
+ rabbit_log_channel:warning("Consumer ~s on channel ~w has timed out "
+ "waiting on consumer acknowledgement. Timeout used: ~p ms",
+ [rabbit_data_coercion:to_binary(ConsumerTag),
+ Channel, Timeout]),
+ Ex = rabbit_misc:amqp_error(precondition_failed,
+ "consumer ack timed out on channel ~w",
+ [Channel], none),
+ handle_exception(Ex, State0);
+ _ ->
+ {noreply, State0}
+ end.
+
+handle_queue_actions(Actions, #ch{} = State0) ->
+ WriterPid = State0#ch.cfg#conf.writer_pid,
+ lists:foldl(
+ fun ({send_credit_reply, Avail}, S0) ->
+ ok = rabbit_writer:send_command(WriterPid,
+ #'basic.credit_ok'{available = Avail}),
+ S0;
+ ({send_drained, {CTag, Credit}}, S0) ->
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.credit_drained'{consumer_tag = CTag,
+ credit_drained = Credit}),
+ S0;
+ ({settled, QRef, MsgSeqNos}, S0) ->
+ confirm(MsgSeqNos, QRef, S0);
+ ({rejected, _QRef, MsgSeqNos}, S0) ->
+ {U, Rej} =
+ lists:foldr(
+ fun(SeqNo, {U1, Acc}) ->
+ case rabbit_confirms:reject(SeqNo, U1) of
+ {ok, MX, U2} ->
+ {U2, [MX | Acc]};
+ {error, not_found} ->
+ {U1, Acc}
+ end
+ end, {S0#ch.unconfirmed, []}, MsgSeqNos),
+ S = S0#ch{unconfirmed = U},
+ record_rejects(Rej, S);
+ ({deliver, CTag, AckRequired, Msgs}, S0) ->
+ handle_deliver(CTag, AckRequired, Msgs, S0);
+ ({queue_down, QRef}, S0) ->
+ handle_consuming_queue_down_or_eol(QRef, S0)
+
+ end, State0, Actions).
+
+find_queue_name_from_pid(Pid, QStates) when is_pid(Pid) ->
+ Fun = fun(K, _V, undefined) ->
+ case rabbit_amqqueue:lookup(K) of
+ {error, not_found} ->
+ undefined;
+ {ok, Q} ->
+ Pids = get_queue_pids(Q),
+ case lists:member(Pid, Pids) of
+ true ->
+ K;
+ false ->
+ undefined
+ end
+ end;
+ (_K, _V, Acc) ->
+ Acc
+ end,
+ rabbit_queue_type:fold_state(Fun, undefined, QStates).
+
+get_queue_pids(Q) when ?amqqueue_is_quorum(Q) ->
+ [amqqueue:get_leader(Q)];
+get_queue_pids(Q) ->
+ [amqqueue:get_pid(Q) | amqqueue:get_slave_pids(Q)].
+
+find_queue_name_from_quorum_name(Name, QStates) ->
+ Fun = fun(K, _V, undefined) ->
+ {ok, Q} = rabbit_amqqueue:lookup(K),
+ case amqqueue:get_pid(Q) of
+ {Name, _} ->
+ amqqueue:get_name(Q);
+ _ ->
+ undefined
+ end
+ end,
+ rabbit_queue_type:fold_state(Fun, undefined, QStates).
diff --git a/deps/rabbit/src/rabbit_channel_interceptor.erl b/deps/rabbit/src/rabbit_channel_interceptor.erl
new file mode 100644
index 0000000000..c40b437f10
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_interceptor.erl
@@ -0,0 +1,104 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_interceptor).
+
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([init/1, intercept_in/3]).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-type(method_name() :: rabbit_framing:amqp_method_name()).
+-type(original_method() :: rabbit_framing:amqp_method_record()).
+-type(processed_method() :: rabbit_framing:amqp_method_record()).
+-type(original_content() :: rabbit_types:maybe(rabbit_types:content())).
+-type(processed_content() :: rabbit_types:maybe(rabbit_types:content())).
+-type(interceptor_state() :: term()).
+
+-callback description() -> [proplists:property()].
+%% Derive some initial state from the channel. This will be passed back
+%% as the third argument of intercept/3.
+-callback init(rabbit_channel:channel()) -> interceptor_state().
+-callback intercept(original_method(), original_content(),
+ interceptor_state()) ->
+ {processed_method(), processed_content()} |
+ rabbit_misc:channel_or_connection_exit().
+-callback applies_to() -> list(method_name()).
+
+added_to_rabbit_registry(_Type, _ModuleName) ->
+ rabbit_channel:refresh_interceptors().
+removed_from_rabbit_registry(_Type) ->
+ rabbit_channel:refresh_interceptors().
+
+init(Ch) ->
+ Mods = [M || {_, M} <- rabbit_registry:lookup_all(channel_interceptor)],
+ check_no_overlap(Mods),
+ [{Mod, Mod:init(Ch)} || Mod <- Mods].
+
+check_no_overlap(Mods) ->
+ check_no_overlap1([sets:from_list(Mod:applies_to()) || Mod <- Mods]).
+
+%% Check no non-empty pairwise intersection in a list of sets
+check_no_overlap1(Sets) ->
+ lists:foldl(fun(Set, Union) ->
+ Is = sets:intersection(Set, Union),
+ case sets:size(Is) of
+ 0 -> ok;
+ _ ->
+ internal_error("Interceptor: more than one "
+ "module handles ~p~n", [Is])
+ end,
+ sets:union(Set, Union)
+ end,
+ sets:new(),
+ Sets),
+ ok.
+
+intercept_in(M, C, Mods) ->
+ lists:foldl(fun({Mod, ModState}, {M1, C1}) ->
+ call_module(Mod, ModState, M1, C1)
+ end,
+ {M, C},
+ Mods).
+
+call_module(Mod, St, M, C) ->
+ % this little dance is because Mod might be unloaded at any point
+ case (catch {ok, Mod:intercept(M, C, St)}) of
+ {ok, R} -> validate_response(Mod, M, C, R);
+ {'EXIT', {undef, [{Mod, intercept, _, _} | _]}} -> {M, C}
+ end.
+
+validate_response(Mod, M1, C1, R = {M2, C2}) ->
+ case {validate_method(M1, M2), validate_content(C1, C2)} of
+ {true, true} -> R;
+ {false, _} ->
+ internal_error("Interceptor: ~p expected to return "
+ "method: ~p but returned: ~p",
+ [Mod, rabbit_misc:method_record_type(M1),
+ rabbit_misc:method_record_type(M2)]);
+ {_, false} ->
+ internal_error("Interceptor: ~p expected to return "
+ "content iff content is provided but "
+ "content in = ~p; content out = ~p",
+ [Mod, C1, C2])
+ end.
+
+validate_method(M, M2) ->
+ rabbit_misc:method_record_type(M) =:= rabbit_misc:method_record_type(M2).
+
+validate_content(none, none) -> true;
+validate_content(#content{}, #content{}) -> true;
+validate_content(_, _) -> false.
+
+%% keep dialyzer happy
+-spec internal_error(string(), [any()]) -> no_return().
+internal_error(Format, Args) ->
+ rabbit_misc:protocol_error(internal_error, Format, Args).
diff --git a/deps/rabbit/src/rabbit_channel_sup.erl b/deps/rabbit/src/rabbit_channel_sup.erl
new file mode 100644
index 0000000000..0d405ad3a7
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_sup.erl
@@ -0,0 +1,92 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_sup).
+
+%% Supervises processes that implement AMQP 0-9-1 channels:
+%%
+%% * Channel process itself
+%% * Network writer (for network connections)
+%% * Limiter (handles channel QoS and flow control)
+%%
+%% Every rabbit_channel_sup is supervised by rabbit_channel_sup_sup.
+%%
+%% See also rabbit_channel, rabbit_writer, rabbit_limiter.
+
+-behaviour(supervisor2).
+
+-export([start_link/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-export_type([start_link_args/0]).
+
+-type start_link_args() ::
+ {'tcp', rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), pid(), string(), rabbit_types:protocol(),
+ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
+ pid()} |
+ {'direct', rabbit_channel:channel_number(), pid(), string(),
+ rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
+ rabbit_framing:amqp_table(), pid()}.
+
+-define(FAIR_WAIT, 70000).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(start_link_args()) -> {'ok', pid(), {pid(), any()}}.
+
+start_link({tcp, Sock, Channel, FrameMax, ReaderPid, ConnName, Protocol, User,
+ VHost, Capabilities, Collector}) ->
+ {ok, SupPid} = supervisor2:start_link(
+ ?MODULE, {tcp, Sock, Channel, FrameMax,
+ ReaderPid, Protocol, {ConnName, Channel}}),
+ [LimiterPid] = supervisor2:find_child(SupPid, limiter),
+ [WriterPid] = supervisor2:find_child(SupPid, writer),
+ {ok, ChannelPid} =
+ supervisor2:start_child(
+ SupPid,
+ {channel, {rabbit_channel, start_link,
+ [Channel, ReaderPid, WriterPid, ReaderPid, ConnName,
+ Protocol, User, VHost, Capabilities, Collector,
+ LimiterPid]},
+ intrinsic, ?FAIR_WAIT, worker, [rabbit_channel]}),
+ {ok, AState} = rabbit_command_assembler:init(Protocol),
+ {ok, SupPid, {ChannelPid, AState}};
+start_link({direct, Channel, ClientChannelPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, Collector, AmqpParams}) ->
+ {ok, SupPid} = supervisor2:start_link(
+ ?MODULE, {direct, {ConnName, Channel}}),
+ [LimiterPid] = supervisor2:find_child(SupPid, limiter),
+ {ok, ChannelPid} =
+ supervisor2:start_child(
+ SupPid,
+ {channel, {rabbit_channel, start_link,
+ [Channel, ClientChannelPid, ClientChannelPid, ConnPid,
+ ConnName, Protocol, User, VHost, Capabilities, Collector,
+ LimiterPid, AmqpParams]},
+ intrinsic, ?FAIR_WAIT, worker, [rabbit_channel]}),
+ {ok, SupPid, {ChannelPid, none}}.
+
+%%----------------------------------------------------------------------------
+
+init(Type) ->
+ ?LG_PROCESS_TYPE(channel_sup),
+ {ok, {{one_for_all, 0, 1}, child_specs(Type)}}.
+
+child_specs({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol, Identity}) ->
+ [{writer, {rabbit_writer, start_link,
+ [Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, true]},
+ intrinsic, ?FAIR_WAIT, worker, [rabbit_writer]}
+ | child_specs({direct, Identity})];
+child_specs({direct, Identity}) ->
+ [{limiter, {rabbit_limiter, start_link, [Identity]},
+ transient, ?FAIR_WAIT, worker, [rabbit_limiter]}].
diff --git a/deps/rabbit/src/rabbit_channel_sup_sup.erl b/deps/rabbit/src/rabbit_channel_sup_sup.erl
new file mode 100644
index 0000000000..72cf38d6c8
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_sup_sup.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_sup_sup).
+
+%% Supervisor for AMQP 0-9-1 channels. Every AMQP 0-9-1 connection has
+%% one of these.
+%%
+%% See also rabbit_channel_sup, rabbit_connection_helper_sup, rabbit_reader.
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_channel/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+-spec start_channel(pid(), rabbit_channel_sup:start_link_args()) ->
+ {'ok', pid(), {pid(), any()}}.
+
+start_channel(Pid, Args) ->
+ supervisor2:start_child(Pid, [Args]).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ ?LG_PROCESS_TYPE(channel_sup_sup),
+ {ok, {{simple_one_for_one, 0, 1},
+ [{channel_sup, {rabbit_channel_sup, start_link, []},
+ temporary, infinity, supervisor, [rabbit_channel_sup]}]}}.
diff --git a/deps/rabbit/src/rabbit_channel_tracking.erl b/deps/rabbit/src/rabbit_channel_tracking.erl
new file mode 100644
index 0000000000..42ab664a06
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_tracking.erl
@@ -0,0 +1,291 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_tracking).
+
+%% Abstracts away how tracked connection records are stored
+%% and queried.
+%%
+%% See also:
+%%
+%% * rabbit_channel_tracking_handler
+%% * rabbit_reader
+%% * rabbit_event
+-behaviour(rabbit_tracking).
+
+-export([boot/0,
+ update_tracked/1,
+ handle_cast/1,
+ register_tracked/1,
+ unregister_tracked/1,
+ count_tracked_items_in/1,
+ clear_tracking_tables/0,
+ shutdown_tracked_items/2]).
+
+-export([list/0, list_of_user/1, list_on_node/1,
+ tracked_channel_table_name_for/1,
+ tracked_channel_per_user_table_name_for/1,
+ get_all_tracked_channel_table_names_for_node/1,
+ delete_tracked_channel_user_entry/1]).
+
+-include_lib("rabbit.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+%%
+%% API
+%%
+
+%% Sets up and resets channel tracking tables for this node.
+-spec boot() -> ok.
+
+boot() ->
+ ensure_tracked_channels_table_for_this_node(),
+ rabbit_log:info("Setting up a table for channel tracking on this node: ~p",
+ [tracked_channel_table_name_for(node())]),
+ ensure_per_user_tracked_channels_table_for_node(),
+ rabbit_log:info("Setting up a table for channel tracking on this node: ~p",
+ [tracked_channel_per_user_table_name_for(node())]),
+ clear_tracking_tables(),
+ ok.
+
+-spec update_tracked(term()) -> ok.
+
+update_tracked(Event) ->
+ spawn(?MODULE, handle_cast, [Event]),
+ ok.
+
+%% Asynchronously handle update events
+-spec handle_cast(term()) -> ok.
+
+handle_cast({channel_created, Details}) ->
+ ThisNode = node(),
+ case node(pget(pid, Details)) of
+ ThisNode ->
+ TrackedCh = #tracked_channel{id = TrackedChId} =
+ tracked_channel_from_channel_created_event(Details),
+ try
+ register_tracked(TrackedCh)
+ catch
+ error:{no_exists, _} ->
+ Msg = "Could not register channel ~p for tracking, "
+ "its table is not ready yet or the channel terminated prematurely",
+ rabbit_log_connection:warning(Msg, [TrackedChId]),
+ ok;
+ error:Err ->
+ Msg = "Could not register channel ~p for tracking: ~p",
+ rabbit_log_connection:warning(Msg, [TrackedChId, Err]),
+ ok
+ end;
+ _OtherNode ->
+ %% ignore
+ ok
+ end;
+handle_cast({channel_closed, Details}) ->
+ %% channel has terminated, unregister iff local
+ case get_tracked_channel_by_pid(pget(pid, Details)) of
+ [#tracked_channel{name = Name}] ->
+ unregister_tracked(rabbit_tracking:id(node(), Name));
+ _Other -> ok
+ end;
+handle_cast({connection_closed, ConnDetails}) ->
+ ThisNode= node(),
+ ConnPid = pget(pid, ConnDetails),
+
+ case pget(node, ConnDetails) of
+ ThisNode ->
+ TrackedChs = get_tracked_channels_by_connection_pid(ConnPid),
+ rabbit_log_connection:info(
+ "Closing all channels from connection '~p' "
+ "because it has been closed", [pget(name, ConnDetails)]),
+ %% Shutting down channels will take care of unregistering the
+ %% corresponding tracking.
+ shutdown_tracked_items(TrackedChs, undefined),
+ ok;
+ _DifferentNode ->
+ ok
+ end;
+handle_cast({user_deleted, Details}) ->
+ Username = pget(name, Details),
+ %% Schedule user entry deletion, allowing time for connections to close
+ _ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE,
+ delete_tracked_channel_user_entry, [Username]),
+ ok;
+handle_cast({node_deleted, Details}) ->
+ Node = pget(node, Details),
+ rabbit_log_connection:info(
+ "Node '~s' was removed from the cluster, deleting"
+ " its channel tracking tables...", [Node]),
+ delete_tracked_channels_table_for_node(Node),
+ delete_per_user_tracked_channels_table_for_node(Node).
+
+-spec register_tracked(rabbit_types:tracked_channel()) -> ok.
+-dialyzer([{nowarn_function, [register_tracked/1]}, race_conditions]).
+
+register_tracked(TrackedCh =
+ #tracked_channel{node = Node, name = Name, username = Username}) ->
+ ChId = rabbit_tracking:id(Node, Name),
+ TableName = tracked_channel_table_name_for(Node),
+ PerUserChTableName = tracked_channel_per_user_table_name_for(Node),
+ %% upsert
+ case mnesia:dirty_read(TableName, ChId) of
+ [] ->
+ mnesia:dirty_write(TableName, TrackedCh),
+ mnesia:dirty_update_counter(PerUserChTableName, Username, 1);
+ [#tracked_channel{}] ->
+ ok
+ end,
+ ok.
+
+-spec unregister_tracked(rabbit_types:tracked_channel_id()) -> ok.
+
+unregister_tracked(ChId = {Node, _Name}) when Node =:= node() ->
+ TableName = tracked_channel_table_name_for(Node),
+ PerUserChannelTableName = tracked_channel_per_user_table_name_for(Node),
+ case mnesia:dirty_read(TableName, ChId) of
+ [] -> ok;
+ [#tracked_channel{username = Username}] ->
+ mnesia:dirty_update_counter(PerUserChannelTableName, Username, -1),
+ mnesia:dirty_delete(TableName, ChId)
+ end.
+
+-spec count_tracked_items_in({atom(), rabbit_types:username()}) -> non_neg_integer().
+
+count_tracked_items_in({user, Username}) ->
+ rabbit_tracking:count_tracked_items(
+ fun tracked_channel_per_user_table_name_for/1,
+ #tracked_channel_per_user.channel_count, Username,
+ "channels in vhost").
+
+-spec clear_tracking_tables() -> ok.
+
+clear_tracking_tables() ->
+ clear_tracked_channel_tables_for_this_node(),
+ ok.
+
+-spec shutdown_tracked_items(list(), term()) -> ok.
+
+shutdown_tracked_items(TrackedItems, _Args) ->
+ close_channels(TrackedItems).
+
+%% helper functions
+-spec list() -> [rabbit_types:tracked_channel()].
+
+list() ->
+ lists:foldl(
+ fun (Node, Acc) ->
+ Tab = tracked_channel_table_name_for(Node),
+ Acc ++ mnesia:dirty_match_object(Tab, #tracked_channel{_ = '_'})
+ end, [], rabbit_nodes:all_running()).
+
+-spec list_of_user(rabbit_types:username()) -> [rabbit_types:tracked_channel()].
+
+list_of_user(Username) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_channel_table_name_for/1,
+ #tracked_channel{username = Username, _ = '_'}).
+
+-spec list_on_node(node()) -> [rabbit_types:tracked_channel()].
+
+list_on_node(Node) ->
+ try mnesia:dirty_match_object(
+ tracked_channel_table_name_for(Node),
+ #tracked_channel{_ = '_'})
+ catch exit:{aborted, {no_exists, _}} -> []
+ end.
+
+-spec tracked_channel_table_name_for(node()) -> atom().
+
+tracked_channel_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format("tracked_channel_on_node_~s", [Node])).
+
+-spec tracked_channel_per_user_table_name_for(node()) -> atom().
+
+tracked_channel_per_user_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format(
+ "tracked_channel_table_per_user_on_node_~s", [Node])).
+
+%% internal
+ensure_tracked_channels_table_for_this_node() ->
+ ensure_tracked_channels_table_for_node(node()).
+
+ensure_per_user_tracked_channels_table_for_node() ->
+ ensure_per_user_tracked_channels_table_for_node(node()).
+
+%% Create tables
+ensure_tracked_channels_table_for_node(Node) ->
+ TableName = tracked_channel_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_channel},
+ {attributes, record_info(fields, tracked_channel)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a tracked channel table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+ensure_per_user_tracked_channels_table_for_node(Node) ->
+ TableName = tracked_channel_per_user_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_channel_per_user},
+ {attributes, record_info(fields, tracked_channel_per_user)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a per-user tracked channel table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+clear_tracked_channel_tables_for_this_node() ->
+ [rabbit_tracking:clear_tracking_table(T)
+ || T <- get_all_tracked_channel_table_names_for_node(node())].
+
+delete_tracked_channels_table_for_node(Node) ->
+ TableName = tracked_channel_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node, "tracked channel").
+
+delete_per_user_tracked_channels_table_for_node(Node) ->
+ TableName = tracked_channel_per_user_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node,
+ "per-user tracked channels").
+
+get_all_tracked_channel_table_names_for_node(Node) ->
+ [tracked_channel_table_name_for(Node),
+ tracked_channel_per_user_table_name_for(Node)].
+
+get_tracked_channels_by_connection_pid(ConnPid) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_channel_table_name_for/1,
+ #tracked_channel{connection = ConnPid, _ = '_'}).
+
+get_tracked_channel_by_pid(ChPid) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_channel_table_name_for/1,
+ #tracked_channel{pid = ChPid, _ = '_'}).
+
+delete_tracked_channel_user_entry(Username) ->
+ rabbit_tracking:delete_tracked_entry(
+ {rabbit_auth_backend_internal, exists, [Username]},
+ fun tracked_channel_per_user_table_name_for/1,
+ Username).
+
+tracked_channel_from_channel_created_event(ChannelDetails) ->
+ Node = node(ChPid = pget(pid, ChannelDetails)),
+ Name = pget(name, ChannelDetails),
+ #tracked_channel{
+ id = rabbit_tracking:id(Node, Name),
+ name = Name,
+ node = Node,
+ vhost = pget(vhost, ChannelDetails),
+ pid = ChPid,
+ connection = pget(connection, ChannelDetails),
+ username = pget(user, ChannelDetails)}.
+
+close_channels(TrackedChannels = [#tracked_channel{}|_]) ->
+ [rabbit_channel:shutdown(ChPid)
+ || #tracked_channel{pid = ChPid} <- TrackedChannels],
+ ok;
+close_channels(_TrackedChannels = []) -> ok.
diff --git a/deps/rabbit/src/rabbit_channel_tracking_handler.erl b/deps/rabbit/src/rabbit_channel_tracking_handler.erl
new file mode 100644
index 0000000000..0cbe02f39e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_tracking_handler.erl
@@ -0,0 +1,71 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_tracking_handler).
+
+%% This module keeps track of channel creation and termination events
+%% on its local node. Similar to the rabbit_connection_tracking_handler,
+%% the primary goal here is to decouple channel tracking from rabbit_reader
+%% and isolate channel tracking to its own process to avoid blocking connection
+%% creation events. Additionaly, creation events are also non-blocking in that
+%% they spawn a short-live process for updating the tracking tables in realtime.
+%%
+%% Events from other nodes are ignored.
+
+-behaviour(gen_event).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include_lib("rabbit.hrl").
+
+-rabbit_boot_step({?MODULE,
+ [{description, "channel tracking event handler"},
+ {mfa, {gen_event, add_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {cleanup, {gen_event, delete_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {requires, [channel_tracking]},
+ {enables, recovery}]}).
+
+%%
+%% API
+%%
+
+init([]) ->
+ {ok, []}.
+
+handle_event(#event{type = channel_created, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({channel_created, Details}),
+ {ok, State};
+handle_event(#event{type = channel_closed, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({channel_closed, Details}),
+ {ok, State};
+handle_event(#event{type = connection_closed, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({connection_closed, Details}),
+ {ok, State};
+handle_event(#event{type = user_deleted, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({user_deleted, Details}),
+ {ok, State};
+%% A node had been deleted from the cluster.
+handle_event(#event{type = node_deleted, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({node_deleted, Details}),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_classic_queue.erl b/deps/rabbit/src/rabbit_classic_queue.erl
new file mode 100644
index 0000000000..e53c0aecc2
--- /dev/null
+++ b/deps/rabbit/src/rabbit_classic_queue.erl
@@ -0,0 +1,527 @@
+-module(rabbit_classic_queue).
+-behaviour(rabbit_queue_type).
+
+-include("amqqueue.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-record(msg_status, {pending :: [pid()],
+ confirmed = [] :: [pid()]}).
+
+-record(?MODULE, {pid :: undefined | pid(), %% the current master pid
+ qref :: term(), %% TODO
+ unconfirmed = #{} ::
+ #{non_neg_integer() => #msg_status{}}}).
+-define(STATE, ?MODULE).
+
+-opaque state() :: #?STATE{}.
+
+-export_type([state/0]).
+
+-export([
+ is_enabled/0,
+ declare/2,
+ delete/4,
+ is_recoverable/1,
+ recover/2,
+ purge/1,
+ policy_changed/1,
+ stat/1,
+ init/1,
+ close/1,
+ update/2,
+ consume/3,
+ cancel/5,
+ handle_event/2,
+ deliver/2,
+ settle/4,
+ credit/4,
+ dequeue/4,
+ info/2,
+ state_info/1,
+ capabilities/0
+ ]).
+
+-export([delete_crashed/1,
+ delete_crashed/2,
+ delete_crashed_internal/2]).
+
+-export([confirm_to_sender/3,
+ send_rejection/3,
+ send_queue_event/3]).
+
+is_enabled() -> true.
+
+declare(Q, Node) when ?amqqueue_is_classic(Q) ->
+ QName = amqqueue:get_name(Q),
+ VHost = amqqueue:get_vhost(Q),
+ Node1 = case Node of
+ {ignore_location, Node0} ->
+ Node0;
+ _ ->
+ case rabbit_queue_master_location_misc:get_location(Q) of
+ {ok, Node0} -> Node0;
+ _ -> Node
+ end
+ end,
+ Node1 = rabbit_mirror_queue_misc:initial_queue_node(Q, Node1),
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost, Node1) of
+ {ok, _} ->
+ gen_server2:call(
+ rabbit_amqqueue_sup_sup:start_queue_process(Node1, Q, declare),
+ {init, new}, infinity);
+ {error, Error} ->
+ {protocol_error, internal_error, "Cannot declare a queue '~s' on node '~s': ~255p",
+ [rabbit_misc:rs(QName), Node1, Error]}
+ end.
+
+delete(Q, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q) ->
+ case wait_for_promoted_or_stopped(Q) of
+ {promoted, Q1} ->
+ QPid = amqqueue:get_pid(Q1),
+ delegate:invoke(QPid, {gen_server2, call,
+ [{delete, IfUnused, IfEmpty, ActingUser},
+ infinity]});
+ {stopped, Q1} ->
+ #resource{name = Name, virtual_host = Vhost} = amqqueue:get_name(Q1),
+ case IfEmpty of
+ true ->
+ rabbit_log:error("Queue ~s in vhost ~s has its master node down and "
+ "no mirrors available or eligible for promotion. "
+ "The queue may be non-empty. "
+ "Refusing to force-delete.",
+ [Name, Vhost]),
+ {error, not_empty};
+ false ->
+ rabbit_log:warning("Queue ~s in vhost ~s has its master node is down and "
+ "no mirrors available or eligible for promotion. "
+ "Forcing queue deletion.",
+ [Name, Vhost]),
+ delete_crashed_internal(Q1, ActingUser),
+ {ok, 0}
+ end;
+ {error, not_found} ->
+ %% Assume the queue was deleted
+ {ok, 0}
+ end.
+
+is_recoverable(Q) when ?is_amqqueue(Q) ->
+ Node = node(),
+ Node =:= node(amqqueue:get_pid(Q)) andalso
+ %% Terminations on node down will not remove the rabbit_queue
+ %% record if it is a mirrored queue (such info is now obtained from
+ %% the policy). Thus, we must check if the local pid is alive
+ %% - if the record is present - in order to restart.
+ (mnesia:read(rabbit_queue, amqqueue:get_name(Q), read) =:= []
+ orelse not rabbit_mnesia:is_process_alive(amqqueue:get_pid(Q))).
+
+recover(VHost, Queues) ->
+ {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+ %% We rely on BQ:start/1 returning the recovery terms in the same
+ %% order as the supplied queue names, so that we can zip them together
+ %% for further processing in recover_durable_queues.
+ {ok, OrderedRecoveryTerms} =
+ BQ:start(VHost, [amqqueue:get_name(Q) || Q <- Queues]),
+ case rabbit_amqqueue_sup_sup:start_for_vhost(VHost) of
+ {ok, _} ->
+ RecoveredQs = recover_durable_queues(lists:zip(Queues,
+ OrderedRecoveryTerms)),
+ RecoveredNames = [amqqueue:get_name(Q) || Q <- RecoveredQs],
+ FailedQueues = [Q || Q <- Queues,
+ not lists:member(amqqueue:get_name(Q), RecoveredNames)],
+ {RecoveredQs, FailedQueues};
+ {error, Reason} ->
+ rabbit_log:error("Failed to start queue supervisor for vhost '~s': ~s", [VHost, Reason]),
+ throw({error, Reason})
+ end.
+
+-spec policy_changed(amqqueue:amqqueue()) -> ok.
+policy_changed(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ gen_server2:cast(QPid, policy_changed).
+
+stat(Q) ->
+ delegate:invoke(amqqueue:get_pid(Q),
+ {gen_server2, call, [stat, infinity]}).
+
+-spec init(amqqueue:amqqueue()) -> state().
+init(Q) when ?amqqueue_is_classic(Q) ->
+ QName = amqqueue:get_name(Q),
+ #?STATE{pid = amqqueue:get_pid(Q),
+ qref = QName}.
+
+-spec close(state()) -> ok.
+close(_State) ->
+ ok.
+
+-spec update(amqqueue:amqqueue(), state()) -> state().
+update(Q, #?STATE{pid = Pid} = State) when ?amqqueue_is_classic(Q) ->
+ case amqqueue:get_pid(Q) of
+ Pid ->
+ State;
+ NewPid ->
+ %% master pid is different, update
+ State#?STATE{pid = NewPid}
+ end.
+
+consume(Q, Spec, State) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ QRef = amqqueue:get_name(Q),
+ #{no_ack := NoAck,
+ channel_pid := ChPid,
+ limiter_pid := LimiterPid,
+ limiter_active := LimiterActive,
+ prefetch_count := ConsumerPrefetchCount,
+ consumer_tag := ConsumerTag,
+ exclusive_consume := ExclusiveConsume,
+ args := Args,
+ ok_msg := OkMsg,
+ acting_user := ActingUser} = Spec,
+ case delegate:invoke(QPid,
+ {gen_server2, call,
+ [{basic_consume, NoAck, ChPid, LimiterPid,
+ LimiterActive, ConsumerPrefetchCount, ConsumerTag,
+ ExclusiveConsume, Args, OkMsg, ActingUser},
+ infinity]}) of
+ ok ->
+ %% ask the host process to monitor this pid
+ %% TODO: track pids as they change
+ {ok, State#?STATE{pid = QPid}, [{monitor, QPid, QRef}]};
+ Err ->
+ Err
+ end.
+
+cancel(Q, ConsumerTag, OkMsg, ActingUser, State) ->
+ QPid = amqqueue:get_pid(Q),
+ case delegate:invoke(QPid, {gen_server2, call,
+ [{basic_cancel, self(), ConsumerTag,
+ OkMsg, ActingUser}, infinity]}) of
+ ok ->
+ {ok, State};
+ Err -> Err
+ end.
+
+-spec settle(rabbit_queue_type:settle_op(), rabbit_types:ctag(),
+ [non_neg_integer()], state()) ->
+ {state(), rabbit_queue_type:actions()}.
+settle(complete, _CTag, MsgIds, State) ->
+ Pid = State#?STATE.pid,
+ delegate:invoke_no_result(Pid,
+ {gen_server2, cast, [{ack, MsgIds, self()}]}),
+ {State, []};
+settle(Op, _CTag, MsgIds, State) ->
+ ChPid = self(),
+ ok = delegate:invoke_no_result(State#?STATE.pid,
+ {gen_server2, cast,
+ [{reject, Op == requeue, MsgIds, ChPid}]}),
+ {State, []}.
+
+credit(CTag, Credit, Drain, State) ->
+ ChPid = self(),
+ delegate:invoke_no_result(State#?STATE.pid,
+ {gen_server2, cast,
+ [{credit, ChPid, CTag, Credit, Drain}]}),
+ {State, []}.
+
+handle_event({confirm, MsgSeqNos, Pid}, #?STATE{qref = QRef,
+ unconfirmed = U0} = State) ->
+ %% confirms should never result in rejections
+ {Unconfirmed, ConfirmedSeqNos, []} =
+ settle_seq_nos(MsgSeqNos, Pid, U0, confirm),
+ Actions = [{settled, QRef, ConfirmedSeqNos}],
+ %% handle confirm event from queues
+ %% in this case the classic queue should track each individual publish and
+ %% the processes involved and only emit a settle action once they have all
+ %% been received (or DOWN has been received).
+ %% Hence this part of the confirm logic is queue specific.
+ {ok, State#?STATE{unconfirmed = Unconfirmed}, Actions};
+handle_event({reject_publish, SeqNo, _QPid},
+ #?STATE{qref = QRef,
+ unconfirmed = U0} = State) ->
+ %% It does not matter which queue rejected the message,
+ %% if any queue did, it should not be confirmed.
+ {U, Rejected} = reject_seq_no(SeqNo, U0),
+ Actions = [{rejected, QRef, Rejected}],
+ {ok, State#?STATE{unconfirmed = U}, Actions};
+handle_event({down, Pid, Info}, #?STATE{qref = QRef,
+ pid = MasterPid,
+ unconfirmed = U0} = State0) ->
+ Actions0 = case Pid =:= MasterPid of
+ true ->
+ [{queue_down, QRef}];
+ false ->
+ []
+ end,
+ case rabbit_misc:is_abnormal_exit(Info) of
+ false when Info =:= normal andalso Pid == MasterPid ->
+ %% queue was deleted and masterpid is down
+ eol;
+ false ->
+ %% this assumes the mirror isn't part of the active set
+ MsgSeqNos = maps:keys(
+ maps:filter(fun (_, #msg_status{pending = Pids}) ->
+ lists:member(Pid, Pids)
+ end, U0)),
+ {Unconfirmed, Settled, Rejected} =
+ settle_seq_nos(MsgSeqNos, Pid, U0, down),
+ Actions = settlement_action(
+ settled, QRef, Settled,
+ settlement_action(rejected, QRef, Rejected, Actions0)),
+ {ok, State0#?STATE{unconfirmed = Unconfirmed}, Actions};
+ true ->
+ %% any abnormal exit should be considered a full reject of the
+ %% oustanding message ids - If the message didn't get to all
+ %% mirrors we have to assume it will never get there
+ MsgIds = maps:fold(
+ fun (SeqNo, Status, Acc) ->
+ case lists:member(Pid, Status#msg_status.pending) of
+ true ->
+ [SeqNo | Acc];
+ false ->
+ Acc
+ end
+ end, [], U0),
+ U = maps:without(MsgIds, U0),
+ {ok, State0#?STATE{unconfirmed = U},
+ [{rejected, QRef, MsgIds} | Actions0]}
+ end;
+handle_event({send_credit_reply, _} = Action, State) ->
+ {ok, State, [Action]}.
+
+settlement_action(_Type, _QRef, [], Acc) ->
+ Acc;
+settlement_action(Type, QRef, MsgSeqs, Acc) ->
+ [{Type, QRef, MsgSeqs} | Acc].
+
+-spec deliver([{amqqueue:amqqueue(), state()}],
+ Delivery :: term()) ->
+ {[{amqqueue:amqqueue(), state()}], rabbit_queue_type:actions()}.
+deliver(Qs0, #delivery{flow = Flow,
+ msg_seq_no = MsgNo,
+ message = #basic_message{exchange_name = _Ex},
+ confirm = _Confirm} = Delivery) ->
+ %% TODO: record master and slaves for confirm processing
+ {MPids, SPids, Qs, Actions} = qpids(Qs0, MsgNo),
+ QPids = MPids ++ SPids,
+ case Flow of
+ %% Here we are tracking messages sent by the rabbit_channel
+ %% process. We are accessing the rabbit_channel process
+ %% dictionary.
+ flow -> [credit_flow:send(QPid) || QPid <- QPids],
+ [credit_flow:send(QPid) || QPid <- SPids];
+ noflow -> ok
+ end,
+ MMsg = {deliver, Delivery, false},
+ SMsg = {deliver, Delivery, true},
+ delegate:invoke_no_result(MPids, {gen_server2, cast, [MMsg]}),
+ delegate:invoke_no_result(SPids, {gen_server2, cast, [SMsg]}),
+ {Qs, Actions}.
+
+
+-spec dequeue(NoAck :: boolean(), LimiterPid :: pid(),
+ rabbit_types:ctag(), state()) ->
+ {ok, Count :: non_neg_integer(), rabbit_amqqueue:qmsg(), state()} |
+ {empty, state()}.
+dequeue(NoAck, LimiterPid, _CTag, State) ->
+ QPid = State#?STATE.pid,
+ case delegate:invoke(QPid, {gen_server2, call,
+ [{basic_get, self(), NoAck, LimiterPid}, infinity]}) of
+ empty ->
+ {empty, State};
+ {ok, Count, Msg} ->
+ {ok, Count, Msg, State}
+ end.
+
+-spec state_info(state()) -> #{atom() := term()}.
+state_info(_State) ->
+ #{}.
+
+%% general queue info
+-spec info(amqqueue:amqqueue(), all_keys | rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+info(Q, Items) ->
+ QPid = amqqueue:get_pid(Q),
+ Req = case Items of
+ all_keys -> info;
+ _ -> {info, Items}
+ end,
+ case delegate:invoke(QPid, {gen_server2, call, [Req, infinity]}) of
+ {ok, Result} ->
+ Result;
+ {error, _Err} ->
+ [];
+ Result when is_list(Result) ->
+ %% this is a backwards compatibility clause
+ Result
+ end.
+
+-spec purge(amqqueue:amqqueue()) ->
+ {ok, non_neg_integer()}.
+purge(Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke(QPid, {gen_server2, call, [purge, infinity]}).
+
+qpids(Qs, MsgNo) ->
+ lists:foldl(
+ fun ({Q, S0}, {MPidAcc, SPidAcc, Qs0, Actions0}) ->
+ QPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ QRef = amqqueue:get_name(Q),
+ Actions = [{monitor, QPid, QRef}
+ | [{monitor, P, QRef} || P <- SPids]] ++ Actions0,
+ %% confirm record only if MsgNo isn't undefined
+ S = case S0 of
+ #?STATE{unconfirmed = U0} ->
+ Rec = [QPid | SPids],
+ U = case MsgNo of
+ undefined ->
+ U0;
+ _ ->
+ U0#{MsgNo => #msg_status{pending = Rec}}
+ end,
+ S0#?STATE{pid = QPid,
+ unconfirmed = U};
+ stateless ->
+ S0
+ end,
+ {[QPid | MPidAcc], SPidAcc ++ SPids,
+ [{Q, S} | Qs0], Actions}
+ end, {[], [], [], []}, Qs).
+
+%% internal-ish
+-spec wait_for_promoted_or_stopped(amqqueue:amqqueue()) ->
+ {promoted, amqqueue:amqqueue()} |
+ {stopped, amqqueue:amqqueue()} |
+ {error, not_found}.
+wait_for_promoted_or_stopped(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ QPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ case rabbit_mnesia:is_process_alive(QPid) of
+ true -> {promoted, Q};
+ false ->
+ case lists:any(fun(Pid) ->
+ rabbit_mnesia:is_process_alive(Pid)
+ end, SPids) of
+ %% There is a live slave. May be promoted
+ true ->
+ timer:sleep(100),
+ wait_for_promoted_or_stopped(Q);
+ %% All slave pids are stopped.
+ %% No process left for the queue
+ false -> {stopped, Q}
+ end
+ end;
+ {error, not_found} ->
+ {error, not_found}
+ end.
+
+-spec delete_crashed(amqqueue:amqqueue()) -> ok.
+delete_crashed(Q) ->
+ delete_crashed(Q, ?INTERNAL_USER).
+
+delete_crashed(Q, ActingUser) ->
+ ok = rpc:call(amqqueue:qnode(Q), ?MODULE, delete_crashed_internal,
+ [Q, ActingUser]).
+
+delete_crashed_internal(Q, ActingUser) ->
+ QName = amqqueue:get_name(Q),
+ {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+ BQ:delete_crashed(Q),
+ ok = rabbit_amqqueue:internal_delete(QName, ActingUser).
+
+recover_durable_queues(QueuesAndRecoveryTerms) ->
+ {Results, Failures} =
+ gen_server2:mcall(
+ [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery),
+ {init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
+ [rabbit_log:error("Queue ~p failed to initialise: ~p~n",
+ [Pid, Error]) || {Pid, Error} <- Failures],
+ [Q || {_, {new, Q}} <- Results].
+
+capabilities() ->
+ #{policies => [<<"expires">>, <<"message-ttl">>, <<"dead-letter-exchange">>,
+ <<"dead-letter-routing-key">>, <<"max-length">>,
+ <<"max-length-bytes">>, <<"max-in-memory-length">>, <<"max-in-memory-bytes">>,
+ <<"max-priority">>, <<"overflow">>, <<"queue-mode">>,
+ <<"single-active-consumer">>, <<"delivery-limit">>,
+ <<"ha-mode">>, <<"ha-params">>, <<"ha-sync-mode">>,
+ <<"ha-promote-on-shutdown">>, <<"ha-promote-on-failure">>,
+ <<"queue-master-locator">>],
+ queue_arguments => [<<"x-expires">>, <<"x-message-ttl">>, <<"x-dead-letter-exchange">>,
+ <<"x-dead-letter-routing-key">>, <<"x-max-length">>,
+ <<"x-max-length-bytes">>, <<"x-max-in-memory-length">>,
+ <<"x-max-in-memory-bytes">>, <<"x-max-priority">>,
+ <<"x-overflow">>, <<"x-queue-mode">>, <<"x-single-active-consumer">>,
+ <<"x-queue-type">>, <<"x-queue-master-locator">>],
+ consumer_arguments => [<<"x-cancel-on-ha-failover">>,
+ <<"x-priority">>, <<"x-credit">>
+ ],
+ server_named => true}.
+
+reject_seq_no(SeqNo, U0) ->
+ reject_seq_no(SeqNo, U0, []).
+
+reject_seq_no(SeqNo, U0, Acc) ->
+ case maps:take(SeqNo, U0) of
+ {_, U} ->
+ {U, [SeqNo | Acc]};
+ error ->
+ {U0, Acc}
+ end.
+
+settle_seq_nos(MsgSeqNos, Pid, U0, Reason) ->
+ lists:foldl(
+ fun (SeqNo, {U, C0, R0}) ->
+ case U of
+ #{SeqNo := Status0} ->
+ case update_msg_status(Reason, Pid, Status0) of
+ #msg_status{pending = [],
+ confirmed = []} ->
+ %% no pending left and nothing confirmed
+ %% then we reject it
+ {maps:remove(SeqNo, U), C0, [SeqNo | R0]};
+ #msg_status{pending = [],
+ confirmed = _} ->
+ %% this can be confirmed as there are no pending
+ %% and confirmed isn't empty
+ {maps:remove(SeqNo, U), [SeqNo | C0], R0};
+ MsgStatus ->
+ {U#{SeqNo => MsgStatus}, C0, R0}
+ end;
+ _ ->
+ {U, C0, R0}
+ end
+ end, {U0, [], []}, MsgSeqNos).
+
+update_msg_status(confirm, Pid, #msg_status{pending = P,
+ confirmed = C} = S) ->
+ Rem = lists:delete(Pid, P),
+ S#msg_status{pending = Rem, confirmed = [Pid | C]};
+update_msg_status(down, Pid, #msg_status{pending = P} = S) ->
+ S#msg_status{pending = lists:delete(Pid, P)}.
+
+%% part of channel <-> queue api
+confirm_to_sender(Pid, QName, MsgSeqNos) ->
+ %% the stream queue included the queue type refactoring and thus requires
+ %% a different message format
+ Evt = case rabbit_ff_registry:is_enabled(stream_queue) of
+ true ->
+ {queue_event, QName, {confirm, MsgSeqNos, self()}};
+ false ->
+ {confirm, MsgSeqNos, self()}
+ end,
+ gen_server2:cast(Pid, Evt).
+
+send_rejection(Pid, QName, MsgSeqNo) ->
+ case rabbit_ff_registry:is_enabled(stream_queue) of
+ true ->
+ gen_server2:cast(Pid, {queue_event, QName,
+ {reject_publish, MsgSeqNo, self()}});
+ false ->
+ gen_server2:cast(Pid, {reject_publish, MsgSeqNo, self()})
+ end.
+
+send_queue_event(Pid, QName, Evt) ->
+ gen_server2:cast(Pid, {queue_event, QName, Evt}).
diff --git a/deps/rabbit/src/rabbit_client_sup.erl b/deps/rabbit/src/rabbit_client_sup.erl
new file mode 100644
index 0000000000..a28e4ce39c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_client_sup.erl
@@ -0,0 +1,43 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_client_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/1, start_link/2, start_link_worker/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Callback) ->
+ supervisor2:start_link(?MODULE, Callback).
+
+-spec start_link({'local', atom()}, rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(SupName, Callback) ->
+ supervisor2:start_link(SupName, ?MODULE, Callback).
+
+-spec start_link_worker({'local', atom()}, rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link_worker(SupName, Callback) ->
+ supervisor2:start_link(SupName, ?MODULE, {Callback, worker}).
+
+init({M,F,A}) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{client, {M,F,A}, temporary, infinity, supervisor, [M]}]}};
+init({{M,F,A}, worker}) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{client, {M,F,A}, temporary, ?WORKER_WAIT, worker, [M]}]}}.
diff --git a/deps/rabbit/src/rabbit_config.erl b/deps/rabbit/src/rabbit_config.erl
new file mode 100644
index 0000000000..1198035a7a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_config.erl
@@ -0,0 +1,46 @@
+-module(rabbit_config).
+
+-export([
+ config_files/0,
+ get_advanced_config/0
+ ]).
+
+-export([schema_dir/0]).
+-deprecated([{schema_dir, 0, eventually}]).
+
+-export_type([config_location/0]).
+
+-type config_location() :: string().
+
+get_confs() ->
+ case get_prelaunch_config_state() of
+ #{config_files := Confs} -> Confs;
+ _ -> []
+ end.
+
+schema_dir() ->
+ undefined.
+
+get_advanced_config() ->
+ case get_prelaunch_config_state() of
+ %% There can be only one advanced.config
+ #{config_advanced_file := FileName} when FileName =/= undefined ->
+ case rabbit_file:is_file(FileName) of
+ true -> FileName;
+ false -> none
+ end;
+ _ -> none
+ end.
+
+-spec config_files() -> [config_location()].
+config_files() ->
+ ConfFiles = [filename:absname(File) || File <- get_confs(),
+ filelib:is_regular(File)],
+ AdvancedFiles = case get_advanced_config() of
+ none -> [];
+ FileName -> [filename:absname(FileName)]
+ end,
+ AdvancedFiles ++ ConfFiles.
+
+get_prelaunch_config_state() ->
+ rabbit_prelaunch_conf:get_config_state().
diff --git a/deps/rabbit/src/rabbit_confirms.erl b/deps/rabbit/src/rabbit_confirms.erl
new file mode 100644
index 0000000000..2fe032d1f1
--- /dev/null
+++ b/deps/rabbit/src/rabbit_confirms.erl
@@ -0,0 +1,152 @@
+-module(rabbit_confirms).
+
+-compile({no_auto_import, [size/1]}).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([init/0,
+ insert/4,
+ confirm/3,
+ reject/2,
+
+ remove_queue/2,
+
+ smallest/1,
+ size/1,
+ is_empty/1]).
+
+-type seq_no() :: non_neg_integer().
+-type queue_name() :: rabbit_amqqueue:name().
+-type exchange_name() :: rabbit_exchange:name().
+
+-record(?MODULE, {smallest :: undefined | seq_no(),
+ unconfirmed = #{} :: #{seq_no() =>
+ {exchange_name(),
+ #{queue_name() => ok}}}
+ }).
+
+-type mx() :: {seq_no(), exchange_name()}.
+
+-opaque state() :: #?MODULE{}.
+
+-export_type([
+ state/0
+ ]).
+
+-spec init() -> state().
+init() ->
+ #?MODULE{}.
+
+-spec insert(seq_no(), [queue_name()], exchange_name(), state()) ->
+ state().
+insert(SeqNo, QNames, #resource{kind = exchange} = XName,
+ #?MODULE{smallest = S0,
+ unconfirmed = U0} = State)
+ when is_integer(SeqNo)
+ andalso is_list(QNames)
+ andalso is_map_key(SeqNo, U0) == false ->
+ U = U0#{SeqNo => {XName, maps:from_list([{Q, ok} || Q <- QNames])}},
+ S = case S0 of
+ undefined -> SeqNo;
+ _ -> S0
+ end,
+ State#?MODULE{smallest = S,
+ unconfirmed = U}.
+
+-spec confirm([seq_no()], queue_name(), state()) ->
+ {[mx()], state()}.
+confirm(SeqNos, QName, #?MODULE{smallest = Smallest0,
+ unconfirmed = U0} = State)
+ when is_list(SeqNos) ->
+ {Confirmed, U} = lists:foldr(
+ fun (SeqNo, Acc) ->
+ confirm_one(SeqNo, QName, Acc)
+ end, {[], U0}, SeqNos),
+ %% check if smallest is in Confirmed
+ %% TODO: this can be optimised by checking in the preceeding foldr
+ Smallest =
+ case lists:any(fun ({S, _}) -> S == Smallest0 end, Confirmed) of
+ true ->
+ %% work out new smallest
+ next_smallest(Smallest0, U);
+ false ->
+ Smallest0
+ end,
+ {Confirmed, State#?MODULE{smallest = Smallest,
+ unconfirmed = U}}.
+
+-spec reject(seq_no(), state()) ->
+ {ok, mx(), state()} | {error, not_found}.
+reject(SeqNo, #?MODULE{smallest = Smallest0,
+ unconfirmed = U0} = State)
+ when is_integer(SeqNo) ->
+ case maps:take(SeqNo, U0) of
+ {{XName, _QS}, U} ->
+ Smallest = case SeqNo of
+ Smallest0 ->
+ %% need to scan as the smallest was removed
+ next_smallest(Smallest0, U);
+ _ ->
+ Smallest0
+ end,
+ {ok, {SeqNo, XName}, State#?MODULE{unconfirmed = U,
+ smallest = Smallest}};
+ error ->
+ {error, not_found}
+ end.
+
+%% idempotent
+-spec remove_queue(queue_name(), state()) ->
+ {[mx()], state()}.
+remove_queue(QName, #?MODULE{unconfirmed = U} = State) ->
+ SeqNos = maps:fold(
+ fun (SeqNo, {_XName, QS0}, Acc) ->
+ case maps:is_key(QName, QS0) of
+ true ->
+ [SeqNo | Acc];
+ false ->
+ Acc
+ end
+ end, [], U),
+ confirm(lists:sort(SeqNos), QName,State).
+
+-spec smallest(state()) -> seq_no() | undefined.
+smallest(#?MODULE{smallest = Smallest}) ->
+ Smallest.
+
+-spec size(state()) -> non_neg_integer().
+size(#?MODULE{unconfirmed = U}) ->
+ maps:size(U).
+
+-spec is_empty(state()) -> boolean().
+is_empty(State) ->
+ size(State) == 0.
+
+%% INTERNAL
+
+confirm_one(SeqNo, QName, {Acc, U0}) ->
+ case maps:take(SeqNo, U0) of
+ {{XName, QS}, U1}
+ when is_map_key(QName, QS)
+ andalso map_size(QS) == 1 ->
+ %% last queue confirm
+ {[{SeqNo, XName} | Acc], U1};
+ {{XName, QS}, U1} ->
+ {Acc, U1#{SeqNo => {XName, maps:remove(QName, QS)}}};
+ error ->
+ {Acc, U0}
+ end.
+
+next_smallest(_S, U) when map_size(U) == 0 ->
+ undefined;
+next_smallest(S, U) when is_map_key(S, U) ->
+ S;
+next_smallest(S, U) ->
+ %% TODO: this is potentially infinitely recursive if called incorrectly
+ next_smallest(S+1, U).
+
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
diff --git a/deps/rabbit/src/rabbit_connection_helper_sup.erl b/deps/rabbit/src/rabbit_connection_helper_sup.erl
new file mode 100644
index 0000000000..d0509029fd
--- /dev/null
+++ b/deps/rabbit/src/rabbit_connection_helper_sup.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_connection_helper_sup).
+
+%% Supervises auxiliary processes of AMQP 0-9-1 connections:
+%%
+%% * Channel supervisor
+%% * Heartbeat receiver
+%% * Heartbeat sender
+%% * Exclusive queue collector
+%%
+%% See also rabbit_heartbeat, rabbit_channel_sup_sup, rabbit_queue_collector.
+
+-behaviour(supervisor2).
+
+-export([start_link/0]).
+-export([start_channel_sup_sup/1,
+ start_queue_collector/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+-spec start_channel_sup_sup(pid()) -> rabbit_types:ok_pid_or_error().
+
+start_channel_sup_sup(SupPid) ->
+ supervisor2:start_child(
+ SupPid,
+ {channel_sup_sup, {rabbit_channel_sup_sup, start_link, []},
+ intrinsic, infinity, supervisor, [rabbit_channel_sup_sup]}).
+
+-spec start_queue_collector(pid(), rabbit_types:proc_name()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_queue_collector(SupPid, Identity) ->
+ supervisor2:start_child(
+ SupPid,
+ {collector, {rabbit_queue_collector, start_link, [Identity]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_queue_collector]}).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ ?LG_PROCESS_TYPE(connection_helper_sup),
+ {ok, {{one_for_one, 10, 10}, []}}.
diff --git a/deps/rabbit/src/rabbit_connection_sup.erl b/deps/rabbit/src/rabbit_connection_sup.erl
new file mode 100644
index 0000000000..c1d1bd0d77
--- /dev/null
+++ b/deps/rabbit/src/rabbit_connection_sup.erl
@@ -0,0 +1,66 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_connection_sup).
+
+%% Supervisor for a (network) AMQP 0-9-1 client connection.
+%%
+%% Supervises
+%%
+%% * rabbit_reader
+%% * Auxiliary process supervisor
+%%
+%% See also rabbit_reader, rabbit_connection_helper_sup.
+
+-behaviour(supervisor2).
+-behaviour(ranch_protocol).
+
+-export([start_link/4, reader/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(any(), rabbit_net:socket(), module(), any()) ->
+ {'ok', pid(), pid()}.
+
+start_link(Ref, _Sock, _Transport, _Opts) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ %% We need to get channels in the hierarchy here so they get shut
+ %% down after the reader, so the reader gets a chance to terminate
+ %% them cleanly. But for 1.0 readers we can't start the real
+ %% ch_sup_sup (because we don't know if we will be 0-9-1 or 1.0) -
+ %% so we add another supervisor into the hierarchy.
+ %%
+ %% This supervisor also acts as an intermediary for heartbeaters and
+ %% the queue collector process, since these must not be siblings of the
+ %% reader due to the potential for deadlock if they are added/restarted
+ %% whilst the supervision tree is shutting down.
+ {ok, HelperSup} =
+ supervisor2:start_child(
+ SupPid,
+ {helper_sup, {rabbit_connection_helper_sup, start_link, []},
+ intrinsic, infinity, supervisor, [rabbit_connection_helper_sup]}),
+ {ok, ReaderPid} =
+ supervisor2:start_child(
+ SupPid,
+ {reader, {rabbit_reader, start_link, [HelperSup, Ref]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_reader]}),
+ {ok, SupPid, ReaderPid}.
+
+-spec reader(pid()) -> pid().
+
+reader(Pid) ->
+ hd(supervisor2:find_child(Pid, reader)).
+
+%%--------------------------------------------------------------------------
+
+init([]) ->
+ ?LG_PROCESS_TYPE(connection_sup),
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/rabbit/src/rabbit_connection_tracking.erl b/deps/rabbit/src/rabbit_connection_tracking.erl
new file mode 100644
index 0000000000..c0704e6a7c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_connection_tracking.erl
@@ -0,0 +1,515 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_connection_tracking).
+
+%% Abstracts away how tracked connection records are stored
+%% and queried.
+%%
+%% See also:
+%%
+%% * rabbit_connection_tracking_handler
+%% * rabbit_reader
+%% * rabbit_event
+-behaviour(rabbit_tracking).
+
+-export([boot/0,
+ update_tracked/1,
+ handle_cast/1,
+ register_tracked/1,
+ unregister_tracked/1,
+ count_tracked_items_in/1,
+ clear_tracking_tables/0,
+ shutdown_tracked_items/2]).
+
+-export([ensure_tracked_connections_table_for_node/1,
+ ensure_per_vhost_tracked_connections_table_for_node/1,
+ ensure_per_user_tracked_connections_table_for_node/1,
+
+ ensure_tracked_connections_table_for_this_node/0,
+ ensure_per_vhost_tracked_connections_table_for_this_node/0,
+ ensure_per_user_tracked_connections_table_for_this_node/0,
+
+ tracked_connection_table_name_for/1,
+ tracked_connection_per_vhost_table_name_for/1,
+ tracked_connection_per_user_table_name_for/1,
+ get_all_tracked_connection_table_names_for_node/1,
+
+ delete_tracked_connections_table_for_node/1,
+ delete_per_vhost_tracked_connections_table_for_node/1,
+ delete_per_user_tracked_connections_table_for_node/1,
+ delete_tracked_connection_user_entry/1,
+ delete_tracked_connection_vhost_entry/1,
+
+ clear_tracked_connection_tables_for_this_node/0,
+
+ list/0, list/1, list_on_node/1, list_on_node/2, list_of_user/1,
+ tracked_connection_from_connection_created/1,
+ tracked_connection_from_connection_state/1,
+ lookup/1,
+ count/0]).
+
+-include_lib("rabbit.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+-export([close_connections/3]).
+
+%%
+%% API
+%%
+
+%% Behaviour callbacks
+
+-spec boot() -> ok.
+
+%% Sets up and resets connection tracking tables for this
+%% node.
+boot() ->
+ ensure_tracked_connections_table_for_this_node(),
+ rabbit_log:info("Setting up a table for connection tracking on this node: ~p",
+ [tracked_connection_table_name_for(node())]),
+ ensure_per_vhost_tracked_connections_table_for_this_node(),
+ rabbit_log:info("Setting up a table for per-vhost connection counting on this node: ~p",
+ [tracked_connection_per_vhost_table_name_for(node())]),
+ ensure_per_user_tracked_connections_table_for_this_node(),
+ rabbit_log:info("Setting up a table for per-user connection counting on this node: ~p",
+ [tracked_connection_per_user_table_name_for(node())]),
+ clear_tracking_tables(),
+ ok.
+
+-spec update_tracked(term()) -> ok.
+
+update_tracked(Event) ->
+ spawn(?MODULE, handle_cast, [Event]),
+ ok.
+
+%% Asynchronously handle update events
+-spec handle_cast(term()) -> ok.
+
+handle_cast({connection_created, Details}) ->
+ ThisNode = node(),
+ case pget(node, Details) of
+ ThisNode ->
+ TConn = tracked_connection_from_connection_created(Details),
+ ConnId = TConn#tracked_connection.id,
+ try
+ register_tracked(TConn)
+ catch
+ error:{no_exists, _} ->
+ Msg = "Could not register connection ~p for tracking, "
+ "its table is not ready yet or the connection terminated prematurely",
+ rabbit_log_connection:warning(Msg, [ConnId]),
+ ok;
+ error:Err ->
+ Msg = "Could not register connection ~p for tracking: ~p",
+ rabbit_log_connection:warning(Msg, [ConnId, Err]),
+ ok
+ end;
+ _OtherNode ->
+ %% ignore
+ ok
+ end;
+handle_cast({connection_closed, Details}) ->
+ ThisNode = node(),
+ case pget(node, Details) of
+ ThisNode ->
+ %% [{name,<<"127.0.0.1:64078 -> 127.0.0.1:5672">>},
+ %% {pid,<0.1774.0>},
+ %% {node, rabbit@hostname}]
+ unregister_tracked(
+ rabbit_tracking:id(ThisNode, pget(name, Details)));
+ _OtherNode ->
+ %% ignore
+ ok
+ end;
+handle_cast({vhost_deleted, Details}) ->
+ VHost = pget(name, Details),
+ %% Schedule vhost entry deletion, allowing time for connections to close
+ _ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE,
+ delete_tracked_connection_vhost_entry, [VHost]),
+ rabbit_log_connection:info("Closing all connections in vhost '~s' because it's being deleted", [VHost]),
+ shutdown_tracked_items(
+ rabbit_connection_tracking:list(VHost),
+ rabbit_misc:format("vhost '~s' is deleted", [VHost]));
+%% Note: under normal circumstances this will be called immediately
+%% after the vhost_deleted above. Therefore we should be careful about
+%% what we log and be more defensive.
+handle_cast({vhost_down, Details}) ->
+ VHost = pget(name, Details),
+ Node = pget(node, Details),
+ rabbit_log_connection:info("Closing all connections in vhost '~s' on node '~s'"
+ " because the vhost is stopping",
+ [VHost, Node]),
+ shutdown_tracked_items(
+ rabbit_connection_tracking:list_on_node(Node, VHost),
+ rabbit_misc:format("vhost '~s' is down", [VHost]));
+handle_cast({user_deleted, Details}) ->
+ Username = pget(name, Details),
+ %% Schedule user entry deletion, allowing time for connections to close
+ _ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE,
+ delete_tracked_connection_user_entry, [Username]),
+ rabbit_log_connection:info("Closing all connections from user '~s' because it's being deleted", [Username]),
+ shutdown_tracked_items(
+ rabbit_connection_tracking:list_of_user(Username),
+ rabbit_misc:format("user '~s' is deleted", [Username]));
+%% A node had been deleted from the cluster.
+handle_cast({node_deleted, Details}) ->
+ Node = pget(node, Details),
+ rabbit_log_connection:info("Node '~s' was removed from the cluster, deleting its connection tracking tables...", [Node]),
+ delete_tracked_connections_table_for_node(Node),
+ delete_per_vhost_tracked_connections_table_for_node(Node),
+ delete_per_user_tracked_connections_table_for_node(Node).
+
+-spec register_tracked(rabbit_types:tracked_connection()) -> ok.
+-dialyzer([{nowarn_function, [register_tracked/1]}, race_conditions]).
+
+register_tracked(#tracked_connection{username = Username, vhost = VHost, id = ConnId, node = Node} = Conn) when Node =:= node() ->
+ TableName = tracked_connection_table_name_for(Node),
+ PerVhostTableName = tracked_connection_per_vhost_table_name_for(Node),
+ PerUserConnTableName = tracked_connection_per_user_table_name_for(Node),
+ %% upsert
+ case mnesia:dirty_read(TableName, ConnId) of
+ [] ->
+ mnesia:dirty_write(TableName, Conn),
+ mnesia:dirty_update_counter(PerVhostTableName, VHost, 1),
+ mnesia:dirty_update_counter(PerUserConnTableName, Username, 1);
+ [#tracked_connection{}] ->
+ ok
+ end,
+ ok.
+
+-spec unregister_tracked(rabbit_types:tracked_connection_id()) -> ok.
+
+unregister_tracked(ConnId = {Node, _Name}) when Node =:= node() ->
+ TableName = tracked_connection_table_name_for(Node),
+ PerVhostTableName = tracked_connection_per_vhost_table_name_for(Node),
+ PerUserConnTableName = tracked_connection_per_user_table_name_for(Node),
+ case mnesia:dirty_read(TableName, ConnId) of
+ [] -> ok;
+ [#tracked_connection{vhost = VHost, username = Username}] ->
+ mnesia:dirty_update_counter(PerUserConnTableName, Username, -1),
+ mnesia:dirty_update_counter(PerVhostTableName, VHost, -1),
+ mnesia:dirty_delete(TableName, ConnId)
+ end.
+
+-spec count_tracked_items_in({atom(), rabbit_types:vhost()}) -> non_neg_integer().
+
+count_tracked_items_in({vhost, VirtualHost}) ->
+ rabbit_tracking:count_tracked_items(
+ fun tracked_connection_per_vhost_table_name_for/1,
+ #tracked_connection_per_vhost.connection_count, VirtualHost,
+ "connections in vhost");
+count_tracked_items_in({user, Username}) ->
+ rabbit_tracking:count_tracked_items(
+ fun tracked_connection_per_user_table_name_for/1,
+ #tracked_connection_per_user.connection_count, Username,
+ "connections for user").
+
+-spec clear_tracking_tables() -> ok.
+
+clear_tracking_tables() ->
+ clear_tracked_connection_tables_for_this_node().
+
+-spec shutdown_tracked_items(list(), term()) -> ok.
+
+shutdown_tracked_items(TrackedItems, Message) ->
+ close_connections(TrackedItems, Message).
+
+%% Extended API
+
+-spec ensure_tracked_connections_table_for_this_node() -> ok.
+
+ensure_tracked_connections_table_for_this_node() ->
+ ensure_tracked_connections_table_for_node(node()).
+
+
+-spec ensure_per_vhost_tracked_connections_table_for_this_node() -> ok.
+
+ensure_per_vhost_tracked_connections_table_for_this_node() ->
+ ensure_per_vhost_tracked_connections_table_for_node(node()).
+
+
+-spec ensure_per_user_tracked_connections_table_for_this_node() -> ok.
+
+ensure_per_user_tracked_connections_table_for_this_node() ->
+ ensure_per_user_tracked_connections_table_for_node(node()).
+
+
+%% Create tables
+-spec ensure_tracked_connections_table_for_node(node()) -> ok.
+
+ensure_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_connection},
+ {attributes, record_info(fields, tracked_connection)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a tracked connection table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+-spec ensure_per_vhost_tracked_connections_table_for_node(node()) -> ok.
+
+ensure_per_vhost_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_per_vhost_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_connection_per_vhost},
+ {attributes, record_info(fields, tracked_connection_per_vhost)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a per-vhost tracked connection table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+-spec ensure_per_user_tracked_connections_table_for_node(node()) -> ok.
+
+ensure_per_user_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_per_user_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_connection_per_user},
+ {attributes, record_info(fields, tracked_connection_per_user)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a per-user tracked connection table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+-spec clear_tracked_connection_tables_for_this_node() -> ok.
+
+clear_tracked_connection_tables_for_this_node() ->
+ [rabbit_tracking:clear_tracking_table(T)
+ || T <- get_all_tracked_connection_table_names_for_node(node())],
+ ok.
+
+-spec delete_tracked_connections_table_for_node(node()) -> ok.
+
+delete_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node, "tracked connection").
+
+-spec delete_per_vhost_tracked_connections_table_for_node(node()) -> ok.
+
+delete_per_vhost_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_per_vhost_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node,
+ "per-vhost tracked connection").
+
+-spec delete_per_user_tracked_connections_table_for_node(node()) -> ok.
+
+delete_per_user_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_per_user_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node,
+ "per-user tracked connection").
+
+-spec tracked_connection_table_name_for(node()) -> atom().
+
+tracked_connection_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format("tracked_connection_on_node_~s", [Node])).
+
+-spec tracked_connection_per_vhost_table_name_for(node()) -> atom().
+
+tracked_connection_per_vhost_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format("tracked_connection_per_vhost_on_node_~s", [Node])).
+
+-spec tracked_connection_per_user_table_name_for(node()) -> atom().
+
+tracked_connection_per_user_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format(
+ "tracked_connection_table_per_user_on_node_~s", [Node])).
+
+-spec get_all_tracked_connection_table_names_for_node(node()) -> [atom()].
+
+get_all_tracked_connection_table_names_for_node(Node) ->
+ [tracked_connection_table_name_for(Node),
+ tracked_connection_per_vhost_table_name_for(Node),
+ tracked_connection_per_user_table_name_for(Node)].
+
+-spec lookup(rabbit_types:connection_name()) -> rabbit_types:tracked_connection() | 'not_found'.
+
+lookup(Name) ->
+ Nodes = rabbit_nodes:all_running(),
+ lookup(Name, Nodes).
+
+lookup(_, []) ->
+ not_found;
+lookup(Name, [Node | Nodes]) ->
+ TableName = tracked_connection_table_name_for(Node),
+ case mnesia:dirty_read(TableName, {Node, Name}) of
+ [] -> lookup(Name, Nodes);
+ [Row] -> Row
+ end.
+
+-spec list() -> [rabbit_types:tracked_connection()].
+
+list() ->
+ lists:foldl(
+ fun (Node, Acc) ->
+ Tab = tracked_connection_table_name_for(Node),
+ Acc ++ mnesia:dirty_match_object(Tab, #tracked_connection{_ = '_'})
+ end, [], rabbit_nodes:all_running()).
+
+-spec count() -> non_neg_integer().
+
+count() ->
+ lists:foldl(
+ fun (Node, Acc) ->
+ Tab = tracked_connection_table_name_for(Node),
+ Acc + mnesia:table_info(Tab, size)
+ end, 0, rabbit_nodes:all_running()).
+
+-spec list(rabbit_types:vhost()) -> [rabbit_types:tracked_connection()].
+
+list(VHost) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_connection_table_name_for/1,
+ #tracked_connection{vhost = VHost, _ = '_'}).
+
+-spec list_on_node(node()) -> [rabbit_types:tracked_connection()].
+
+list_on_node(Node) ->
+ try mnesia:dirty_match_object(
+ tracked_connection_table_name_for(Node),
+ #tracked_connection{_ = '_'})
+ catch exit:{aborted, {no_exists, _}} -> []
+ end.
+
+-spec list_on_node(node(), rabbit_types:vhost()) -> [rabbit_types:tracked_connection()].
+
+list_on_node(Node, VHost) ->
+ try mnesia:dirty_match_object(
+ tracked_connection_table_name_for(Node),
+ #tracked_connection{vhost = VHost, _ = '_'})
+ catch exit:{aborted, {no_exists, _}} -> []
+ end.
+
+
+-spec list_of_user(rabbit_types:username()) -> [rabbit_types:tracked_connection()].
+
+list_of_user(Username) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_connection_table_name_for/1,
+ #tracked_connection{username = Username, _ = '_'}).
+
+%% Internal, delete tracked entries
+
+delete_tracked_connection_vhost_entry(Vhost) ->
+ rabbit_tracking:delete_tracked_entry(
+ {rabbit_vhost, exists, [Vhost]},
+ fun tracked_connection_per_vhost_table_name_for/1,
+ Vhost).
+
+delete_tracked_connection_user_entry(Username) ->
+ rabbit_tracking:delete_tracked_entry(
+ {rabbit_auth_backend_internal, exists, [Username]},
+ fun tracked_connection_per_user_table_name_for/1,
+ Username).
+
+%% Returns a #tracked_connection from connection_created
+%% event details.
+%%
+%% @see rabbit_connection_tracking_handler.
+tracked_connection_from_connection_created(EventDetails) ->
+ %% Example event:
+ %%
+ %% [{type,network},
+ %% {pid,<0.329.0>},
+ %% {name,<<"127.0.0.1:60998 -> 127.0.0.1:5672">>},
+ %% {port,5672},
+ %% {peer_port,60998},
+ %% {host,{0,0,0,0,0,65535,32512,1}},
+ %% {peer_host,{0,0,0,0,0,65535,32512,1}},
+ %% {ssl,false},
+ %% {peer_cert_subject,''},
+ %% {peer_cert_issuer,''},
+ %% {peer_cert_validity,''},
+ %% {auth_mechanism,<<"PLAIN">>},
+ %% {ssl_protocol,''},
+ %% {ssl_key_exchange,''},
+ %% {ssl_cipher,''},
+ %% {ssl_hash,''},
+ %% {protocol,{0,9,1}},
+ %% {user,<<"guest">>},
+ %% {vhost,<<"/">>},
+ %% {timeout,14},
+ %% {frame_max,131072},
+ %% {channel_max,65535},
+ %% {client_properties,
+ %% [{<<"capabilities">>,table,
+ %% [{<<"publisher_confirms">>,bool,true},
+ %% {<<"consumer_cancel_notify">>,bool,true},
+ %% {<<"exchange_exchange_bindings">>,bool,true},
+ %% {<<"basic.nack">>,bool,true},
+ %% {<<"connection.blocked">>,bool,true},
+ %% {<<"authentication_failure_close">>,bool,true}]},
+ %% {<<"product">>,longstr,<<"Bunny">>},
+ %% {<<"platform">>,longstr,
+ %% <<"ruby 2.3.0p0 (2015-12-25 revision 53290) [x86_64-darwin15]">>},
+ %% {<<"version">>,longstr,<<"2.3.0.pre">>},
+ %% {<<"information">>,longstr,
+ %% <<"http://rubybunny.info">>}]},
+ %% {connected_at,1453214290847}]
+ Name = pget(name, EventDetails),
+ Node = pget(node, EventDetails),
+ #tracked_connection{id = rabbit_tracking:id(Node, Name),
+ name = Name,
+ node = Node,
+ vhost = pget(vhost, EventDetails),
+ username = pget(user, EventDetails),
+ connected_at = pget(connected_at, EventDetails),
+ pid = pget(pid, EventDetails),
+ type = pget(type, EventDetails),
+ peer_host = pget(peer_host, EventDetails),
+ peer_port = pget(peer_port, EventDetails)}.
+
+tracked_connection_from_connection_state(#connection{
+ vhost = VHost,
+ connected_at = Ts,
+ peer_host = PeerHost,
+ peer_port = PeerPort,
+ user = Username,
+ name = Name
+ }) ->
+ tracked_connection_from_connection_created(
+ [{name, Name},
+ {node, node()},
+ {vhost, VHost},
+ {user, Username},
+ {user_who_performed_action, Username},
+ {connected_at, Ts},
+ {pid, self()},
+ {type, network},
+ {peer_port, PeerPort},
+ {peer_host, PeerHost}]).
+
+close_connections(Tracked, Message) ->
+ close_connections(Tracked, Message, 0).
+
+close_connections(Tracked, Message, Delay) ->
+ [begin
+ close_connection(Conn, Message),
+ timer:sleep(Delay)
+ end || Conn <- Tracked],
+ ok.
+
+close_connection(#tracked_connection{pid = Pid, type = network}, Message) ->
+ try
+ rabbit_networking:close_connection(Pid, Message)
+ catch error:{not_a_connection, _} ->
+ %% could has been closed concurrently, or the input
+ %% is bogus. In any case, we should not terminate
+ ok;
+ _:Err ->
+ %% ignore, don't terminate
+ rabbit_log:warning("Could not close connection ~p: ~p", [Pid, Err]),
+ ok
+ end;
+close_connection(#tracked_connection{pid = Pid, type = direct}, Message) ->
+ %% Do an RPC call to the node running the direct client.
+ Node = node(Pid),
+ rpc:call(Node, amqp_direct_connection, server_close, [Pid, 320, Message]).
diff --git a/deps/rabbit/src/rabbit_connection_tracking_handler.erl b/deps/rabbit/src/rabbit_connection_tracking_handler.erl
new file mode 100644
index 0000000000..17085d805a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_connection_tracking_handler.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_connection_tracking_handler).
+
+%% This module keeps track of connection creation and termination events
+%% on its local node. The primary goal here is to decouple connection
+%% tracking from rabbit_reader in rabbit_common.
+%%
+%% Events from other nodes are ignored.
+
+-behaviour(gen_event).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% for compatibility with previous versions of CLI tools
+-export([close_connections/3]).
+
+-include_lib("rabbit.hrl").
+
+-rabbit_boot_step({?MODULE,
+ [{description, "connection tracking event handler"},
+ {mfa, {gen_event, add_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {cleanup, {gen_event, delete_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {requires, [connection_tracking]},
+ {enables, recovery}]}).
+
+%%
+%% API
+%%
+
+init([]) ->
+ {ok, []}.
+
+handle_event(#event{type = connection_created, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({connection_created, Details}),
+ {ok, State};
+handle_event(#event{type = connection_closed, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({connection_closed, Details}),
+ {ok, State};
+handle_event(#event{type = vhost_deleted, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({vhost_deleted, Details}),
+ {ok, State};
+%% Note: under normal circumstances this will be called immediately
+%% after the vhost_deleted above. Therefore we should be careful about
+%% what we log and be more defensive.
+handle_event(#event{type = vhost_down, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({vhost_down, Details}),
+ {ok, State};
+handle_event(#event{type = user_deleted, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({user_deleted, Details}),
+ {ok, State};
+%% A node had been deleted from the cluster.
+handle_event(#event{type = node_deleted, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({node_deleted, Details}),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+close_connections(Tracked, Message, Delay) ->
+ rabbit_connection_tracking:close_connections(Tracked, Message, Delay).
diff --git a/deps/rabbit/src/rabbit_control_pbe.erl b/deps/rabbit/src/rabbit_control_pbe.erl
new file mode 100644
index 0000000000..95c4fe41f1
--- /dev/null
+++ b/deps/rabbit/src/rabbit_control_pbe.erl
@@ -0,0 +1,82 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_control_pbe).
+
+-export([decode/4, encode/4, list_ciphers/0, list_hashes/0]).
+
+% for testing purposes
+-export([evaluate_input_as_term/1]).
+
+list_ciphers() ->
+ {ok, io_lib:format("~p", [rabbit_pbe:supported_ciphers()])}.
+
+list_hashes() ->
+ {ok, io_lib:format("~p", [rabbit_pbe:supported_hashes()])}.
+
+validate(_Cipher, _Hash, Iterations, _Args) when Iterations =< 0 ->
+ {error, io_lib:format("The requested number of iterations is incorrect", [])};
+validate(_Cipher, _Hash, _Iterations, Args) when length(Args) < 2 ->
+ {error, io_lib:format("Please provide a value to encode/decode and a passphrase", [])};
+validate(_Cipher, _Hash, _Iterations, Args) when length(Args) > 2 ->
+ {error, io_lib:format("Too many arguments. Please provide a value to encode/decode and a passphrase", [])};
+validate(Cipher, Hash, _Iterations, _Args) ->
+ case lists:member(Cipher, rabbit_pbe:supported_ciphers()) of
+ false ->
+ {error, io_lib:format("The requested cipher is not supported", [])};
+ true ->
+ case lists:member(Hash, rabbit_pbe:supported_hashes()) of
+ false ->
+ {error, io_lib:format("The requested hash is not supported", [])};
+ true -> ok
+ end
+ end.
+
+encode(Cipher, Hash, Iterations, Args) ->
+ case validate(Cipher, Hash, Iterations, Args) of
+ {error, Err} -> {error, Err};
+ ok ->
+ [Value, PassPhrase] = Args,
+ try begin
+ TermValue = evaluate_input_as_term(Value),
+ Result = {encrypted, _} = rabbit_pbe:encrypt_term(Cipher, Hash, Iterations,
+ list_to_binary(PassPhrase), TermValue),
+ {ok, io_lib:format("~p", [Result])}
+ end
+ catch
+ _:Msg -> {error, io_lib:format("Error during cipher operation: ~p", [Msg])}
+ end
+ end.
+
+decode(Cipher, Hash, Iterations, Args) ->
+ case validate(Cipher, Hash, Iterations, Args) of
+ {error, Err} -> {error, Err};
+ ok ->
+ [Value, PassPhrase] = Args,
+ try begin
+ TermValue = evaluate_input_as_term(Value),
+ TermToDecrypt = case TermValue of
+ {encrypted, _}=EncryptedTerm ->
+ EncryptedTerm;
+ _ ->
+ {encrypted, TermValue}
+ end,
+ Result = rabbit_pbe:decrypt_term(Cipher, Hash, Iterations,
+ list_to_binary(PassPhrase),
+ TermToDecrypt),
+ {ok, io_lib:format("~p", [Result])}
+ end
+ catch
+ _:Msg -> {error, io_lib:format("Error during cipher operation: ~p", [Msg])}
+ end
+ end.
+
+evaluate_input_as_term(Input) ->
+ {ok,Tokens,_EndLine} = erl_scan:string(Input ++ "."),
+ {ok,AbsForm} = erl_parse:parse_exprs(Tokens),
+ {value,TermValue,_Bs} = erl_eval:exprs(AbsForm, erl_eval:new_bindings()),
+ TermValue.
diff --git a/deps/rabbit/src/rabbit_core_ff.erl b/deps/rabbit/src/rabbit_core_ff.erl
new file mode 100644
index 0000000000..6d30846775
--- /dev/null
+++ b/deps/rabbit/src/rabbit_core_ff.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_core_ff).
+
+-export([quorum_queue_migration/3,
+ stream_queue_migration/3,
+ implicit_default_bindings_migration/3,
+ virtual_host_metadata_migration/3,
+ maintenance_mode_status_migration/3,
+ user_limits_migration/3]).
+
+-rabbit_feature_flag(
+ {quorum_queue,
+ #{desc => "Support queues of type `quorum`",
+ doc_url => "https://www.rabbitmq.com/quorum-queues.html",
+ stability => stable,
+ migration_fun => {?MODULE, quorum_queue_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {stream_queue,
+ #{desc => "Support queues of type `stream`",
+ doc_url => "https://www.rabbitmq.com/stream-queues.html",
+ stability => stable,
+ depends_on => [quorum_queue],
+ migration_fun => {?MODULE, stream_queue_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {implicit_default_bindings,
+ #{desc => "Default bindings are now implicit, instead of "
+ "being stored in the database",
+ stability => stable,
+ migration_fun => {?MODULE, implicit_default_bindings_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {virtual_host_metadata,
+ #{desc => "Virtual host metadata (description, tags, etc)",
+ stability => stable,
+ migration_fun => {?MODULE, virtual_host_metadata_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {maintenance_mode_status,
+ #{desc => "Maintenance mode status",
+ stability => stable,
+ migration_fun => {?MODULE, maintenance_mode_status_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {user_limits,
+ #{desc => "Configure connection and channel limits for a user",
+ stability => stable,
+ migration_fun => {?MODULE, user_limits_migration}
+ }}).
+
+%% -------------------------------------------------------------------
+%% Quorum queues.
+%% -------------------------------------------------------------------
+
+-define(quorum_queue_tables, [rabbit_queue,
+ rabbit_durable_queue]).
+
+quorum_queue_migration(FeatureName, _FeatureProps, enable) ->
+ Tables = ?quorum_queue_tables,
+ rabbit_table:wait(Tables, _Retry = true),
+ Fields = amqqueue:fields(amqqueue_v2),
+ migrate_to_amqqueue_with_type(FeatureName, Tables, Fields);
+quorum_queue_migration(_FeatureName, _FeatureProps, is_enabled) ->
+ Tables = ?quorum_queue_tables,
+ rabbit_table:wait(Tables, _Retry = true),
+ Fields = amqqueue:fields(amqqueue_v2),
+ mnesia:table_info(rabbit_queue, attributes) =:= Fields andalso
+ mnesia:table_info(rabbit_durable_queue, attributes) =:= Fields.
+
+stream_queue_migration(_FeatureName, _FeatureProps, _Enable) ->
+ ok.
+
+migrate_to_amqqueue_with_type(FeatureName, [Table | Rest], Fields) ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: migrating Mnesia table ~s...",
+ [FeatureName, Table]),
+ Fun = fun(Queue) -> amqqueue:upgrade_to(amqqueue_v2, Queue) end,
+ case mnesia:transform_table(Table, Fun, Fields) of
+ {atomic, ok} -> migrate_to_amqqueue_with_type(FeatureName,
+ Rest,
+ Fields);
+ {aborted, Reason} -> {error, Reason}
+ end;
+migrate_to_amqqueue_with_type(FeatureName, [], _) ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: Mnesia tables migration done",
+ [FeatureName]),
+ ok.
+
+%% -------------------------------------------------------------------
+%% Default bindings.
+%% -------------------------------------------------------------------
+
+implicit_default_bindings_migration(FeatureName, _FeatureProps,
+ enable) ->
+ %% Default exchange bindings are now implicit (not stored in the
+ %% route tables). It should be safe to remove them outside of a
+ %% transaction.
+ rabbit_table:wait([rabbit_queue]),
+ Queues = mnesia:dirty_all_keys(rabbit_queue),
+ remove_explicit_default_bindings(FeatureName, Queues);
+implicit_default_bindings_migration(_Feature_Name, _FeatureProps,
+ is_enabled) ->
+ undefined.
+
+remove_explicit_default_bindings(_FeatureName, []) ->
+ ok;
+remove_explicit_default_bindings(FeatureName, Queues) ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: deleting explicit default bindings "
+ "for ~b queues (it may take some time)...",
+ [FeatureName, length(Queues)]),
+ [rabbit_binding:remove_default_exchange_binding_rows_of(Q)
+ || Q <- Queues],
+ ok.
+
+%% -------------------------------------------------------------------
+%% Virtual host metadata.
+%% -------------------------------------------------------------------
+
+virtual_host_metadata_migration(_FeatureName, _FeatureProps, enable) ->
+ Tab = rabbit_vhost,
+ rabbit_table:wait([Tab], _Retry = true),
+ Fun = fun(Row) -> vhost:upgrade_to(vhost_v2, Row) end,
+ case mnesia:transform_table(Tab, Fun, vhost:fields(vhost_v2)) of
+ {atomic, ok} -> ok;
+ {aborted, Reason} -> {error, Reason}
+ end;
+virtual_host_metadata_migration(_FeatureName, _FeatureProps, is_enabled) ->
+ mnesia:table_info(rabbit_vhost, attributes) =:= vhost:fields(vhost_v2).
+
+%% -------------------------------------------------------------------
+%% Maintenance mode.
+%% -------------------------------------------------------------------
+
+maintenance_mode_status_migration(FeatureName, _FeatureProps, enable) ->
+ TableName = rabbit_maintenance:status_table_name(),
+ rabbit_log:info(
+ "Creating table ~s for feature flag `~s`",
+ [TableName, FeatureName]),
+ try
+ _ = rabbit_table:create(
+ TableName,
+ rabbit_maintenance:status_table_definition()),
+ _ = rabbit_table:ensure_table_copy(TableName, node())
+ catch throw:Reason ->
+ rabbit_log:error(
+ "Failed to create maintenance status table: ~p",
+ [Reason])
+ end;
+maintenance_mode_status_migration(_FeatureName, _FeatureProps, is_enabled) ->
+ rabbit_table:exists(rabbit_maintenance:status_table_name()).
+
+%% -------------------------------------------------------------------
+%% User limits.
+%% -------------------------------------------------------------------
+
+user_limits_migration(_FeatureName, _FeatureProps, enable) ->
+ Tab = rabbit_user,
+ rabbit_table:wait([Tab], _Retry = true),
+ Fun = fun(Row) -> internal_user:upgrade_to(internal_user_v2, Row) end,
+ case mnesia:transform_table(Tab, Fun, internal_user:fields(internal_user_v2)) of
+ {atomic, ok} -> ok;
+ {aborted, Reason} -> {error, Reason}
+ end;
+user_limits_migration(_FeatureName, _FeatureProps, is_enabled) ->
+ mnesia:table_info(rabbit_user, attributes) =:= internal_user:fields(internal_user_v2).
diff --git a/deps/rabbit/src/rabbit_core_metrics_gc.erl b/deps/rabbit/src/rabbit_core_metrics_gc.erl
new file mode 100644
index 0000000000..890c127586
--- /dev/null
+++ b/deps/rabbit/src/rabbit_core_metrics_gc.erl
@@ -0,0 +1,199 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_core_metrics_gc).
+
+-record(state, {timer,
+ interval
+ }).
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init(_) ->
+ Interval = rabbit_misc:get_env(rabbit, core_metrics_gc_interval, 120000),
+ {ok, start_timer(#state{interval = Interval})}.
+
+handle_call(test, _From, State) ->
+ {reply, ok, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(start_gc, State) ->
+ gc_connections(),
+ gc_channels(),
+ gc_queues(),
+ gc_exchanges(),
+ gc_nodes(),
+ gc_gen_server2(),
+ gc_auth_attempts(),
+ {noreply, start_timer(State)}.
+
+terminate(_Reason, #state{timer = TRef}) ->
+ erlang:cancel_timer(TRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+start_timer(#state{interval = Interval} = St) ->
+ TRef = erlang:send_after(Interval, self(), start_gc),
+ St#state{timer = TRef}.
+
+gc_connections() ->
+ gc_process(connection_created),
+ gc_process(connection_metrics),
+ gc_process(connection_coarse_metrics).
+
+gc_channels() ->
+ gc_process(channel_created),
+ gc_process(channel_metrics),
+ gc_process(channel_process_metrics),
+ ok.
+
+gc_queues() ->
+ gc_local_queues(),
+ gc_global_queues().
+
+gc_local_queues() ->
+ Queues = rabbit_amqqueue:list_local_names(),
+ QueuesDown = rabbit_amqqueue:list_local_names_down(),
+ GbSet = gb_sets:from_list(Queues),
+ GbSetDown = gb_sets:from_list(QueuesDown),
+ gc_queue_metrics(GbSet, GbSetDown),
+ gc_entity(queue_coarse_metrics, GbSet),
+ Followers = gb_sets:from_list([amqqueue:get_name(Q) || Q <- rabbit_amqqueue:list_local_followers() ]),
+ gc_leader_data(Followers).
+
+gc_leader_data(Followers) ->
+ ets:foldl(fun({Id, _, _, _, _}, none) ->
+ gc_leader_data(Id, queue_coarse_metrics, Followers)
+ end, none, queue_coarse_metrics).
+
+gc_leader_data(Id, Table, GbSet) ->
+ case gb_sets:is_member(Id, GbSet) of
+ true ->
+ ets:delete(Table, Id),
+ none;
+ false ->
+ none
+ end.
+
+gc_global_queues() ->
+ GbSet = gb_sets:from_list(rabbit_amqqueue:list_names()),
+ gc_process_and_entity(channel_queue_metrics, GbSet),
+ gc_process_and_entity(consumer_created, GbSet),
+ ExchangeGbSet = gb_sets:from_list(rabbit_exchange:list_names()),
+ gc_process_and_entities(channel_queue_exchange_metrics, GbSet, ExchangeGbSet).
+
+gc_exchanges() ->
+ Exchanges = rabbit_exchange:list_names(),
+ GbSet = gb_sets:from_list(Exchanges),
+ gc_process_and_entity(channel_exchange_metrics, GbSet).
+
+gc_nodes() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ GbSet = gb_sets:from_list(Nodes),
+ gc_entity(node_node_metrics, GbSet).
+
+gc_gen_server2() ->
+ gc_process(gen_server2_metrics).
+
+gc_process(Table) ->
+ ets:foldl(fun({Pid = Key, _}, none) ->
+ gc_process(Pid, Table, Key);
+ ({Pid = Key, _, _, _, _}, none) ->
+ gc_process(Pid, Table, Key);
+ ({Pid = Key, _, _, _}, none) ->
+ gc_process(Pid, Table, Key)
+ end, none, Table).
+
+gc_process(Pid, Table, Key) ->
+ case rabbit_misc:is_process_alive(Pid) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_queue_metrics(GbSet, GbSetDown) ->
+ Table = queue_metrics,
+ ets:foldl(fun({Key, Props, Marker}, none) ->
+ case gb_sets:is_member(Key, GbSet) of
+ true ->
+ case gb_sets:is_member(Key, GbSetDown) of
+ true ->
+ ets:insert(Table, {Key, [{state, down} | lists:keydelete(state, 1, Props)], Marker}),
+ none;
+ false ->
+ none
+ end;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end
+ end, none, Table).
+
+gc_entity(Table, GbSet) ->
+ ets:foldl(fun({{_, Id} = Key, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({Id = Key, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({Id = Key, _, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({Id = Key, _, _, _, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet)
+ end, none, Table).
+
+gc_entity(Id, Table, Key, GbSet) ->
+ case gb_sets:is_member(Id, GbSet) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_process_and_entity(Table, GbSet) ->
+ ets:foldl(fun({{Pid, Id} = Key, _, _, _, _, _, _, _, _}, none)
+ when Table == channel_queue_metrics ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({{Pid, Id} = Key, _, _, _, _, _}, none)
+ when Table == channel_exchange_metrics ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({{Id, Pid, _} = Key, _, _, _, _, _, _}, none)
+ when Table == consumer_created ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({{{Pid, Id}, _} = Key, _, _, _, _}, none) ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet)
+ end, none, Table).
+
+gc_process_and_entity(Id, Pid, Table, Key, GbSet) ->
+ case rabbit_misc:is_process_alive(Pid) andalso gb_sets:is_member(Id, GbSet) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_process_and_entities(Table, QueueGbSet, ExchangeGbSet) ->
+ ets:foldl(fun({{Pid, {Q, X}} = Key, _, _}, none) ->
+ gc_process(Pid, Table, Key),
+ gc_entity(Q, Table, Key, QueueGbSet),
+ gc_entity(X, Table, Key, ExchangeGbSet)
+ end, none, Table).
+
+gc_auth_attempts() ->
+ ets:delete_all_objects(auth_attempt_detailed_metrics).
diff --git a/deps/rabbit/src/rabbit_credential_validation.erl b/deps/rabbit/src/rabbit_credential_validation.erl
new file mode 100644
index 0000000000..8712628ade
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validation.erl
@@ -0,0 +1,44 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_credential_validation).
+
+-include("rabbit.hrl").
+
+%% used for backwards compatibility
+-define(DEFAULT_BACKEND, rabbit_credential_validator_accept_everything).
+
+%%
+%% API
+%%
+
+-export([validate/2, backend/0]).
+
+%% Validates a username/password pair by delegating to the effective
+%% `rabbit_credential_validator`. Used by `rabbit_auth_backend_internal`.
+%% Note that some validators may choose to only validate passwords.
+%%
+%% Possible return values:
+%%
+%% * ok: provided credentials passed validation.
+%% * {error, Error, Args}: provided password password failed validation.
+
+-spec validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
+
+validate(Username, Password) ->
+ Backend = backend(),
+ Backend:validate(Username, Password).
+
+-spec backend() -> atom().
+
+backend() ->
+ case application:get_env(rabbit, credential_validator) of
+ undefined ->
+ ?DEFAULT_BACKEND;
+ {ok, Proplist} ->
+ proplists:get_value(validation_backend, Proplist, ?DEFAULT_BACKEND)
+ end.
diff --git a/deps/rabbit/src/rabbit_credential_validator.erl b/deps/rabbit/src/rabbit_credential_validator.erl
new file mode 100644
index 0000000000..3b5d0752bf
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validator.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_credential_validator).
+
+-include("rabbit.hrl").
+
+%% Validates a password. Used by `rabbit_auth_backend_internal`.
+%%
+%% Possible return values:
+%%
+%% * ok: provided password passed validation.
+%% * {error, Error, Args}: provided password password failed validation.
+
+-callback validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
diff --git a/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl b/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl
new file mode 100644
index 0000000000..fea10fd4b6
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl
@@ -0,0 +1,23 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_credential_validator_accept_everything).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_credential_validator).
+
+%%
+%% API
+%%
+
+-export([validate/2]).
+
+-spec validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
+
+validate(_Username, _Password) ->
+ ok.
diff --git a/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl b/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl
new file mode 100644
index 0000000000..463090127f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl
@@ -0,0 +1,50 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_credential_validator_min_password_length).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_credential_validator).
+
+%% accommodates default (localhost-only) user credentials,
+%% guest/guest
+-define(DEFAULT_MIN_LENGTH, 5).
+
+%%
+%% API
+%%
+
+-export([validate/2]).
+%% for tests
+-export([validate/3]).
+
+-spec validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
+
+validate(Username, Password) ->
+ MinLength = case application:get_env(rabbit, credential_validator) of
+ undefined ->
+ ?DEFAULT_MIN_LENGTH;
+ {ok, Proplist} ->
+ case proplists:get_value(min_length, Proplist) of
+ undefined -> ?DEFAULT_MIN_LENGTH;
+ Value -> rabbit_data_coercion:to_integer(Value)
+ end
+ end,
+ validate(Username, Password, MinLength).
+
+
+-spec validate(rabbit_types:username(), rabbit_types:password(), integer()) -> 'ok' | {'error', string(), [any()]}.
+
+%% passwordless users
+validate(_Username, undefined, MinLength) ->
+ {error, rabbit_misc:format("minimum required password length is ~B", [MinLength])};
+validate(_Username, Password, MinLength) ->
+ case size(Password) >= MinLength of
+ true -> ok;
+ false -> {error, rabbit_misc:format("minimum required password length is ~B", [MinLength])}
+ end.
diff --git a/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl b/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl
new file mode 100644
index 0000000000..dc64cf1d31
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+%% A `rabbit_credential_validator` implementation that matches
+%% password against a pre-configured regular expression.
+-module(rabbit_credential_validator_password_regexp).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_credential_validator).
+
+%%
+%% API
+%%
+
+-export([validate/2]).
+%% for tests
+-export([validate/3]).
+
+-spec validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
+
+validate(Username, Password) ->
+ {ok, Proplist} = application:get_env(rabbit, credential_validator),
+ Regexp = case proplists:get_value(regexp, Proplist) of
+ undefined -> {error, "rabbit.credential_validator.regexp config key is undefined"};
+ Value -> rabbit_data_coercion:to_list(Value)
+ end,
+ validate(Username, Password, Regexp).
+
+
+-spec validate(rabbit_types:username(), rabbit_types:password(), string()) -> 'ok' | {'error', string(), [any()]}.
+
+validate(_Username, Password, Pattern) ->
+ case re:run(rabbit_data_coercion:to_list(Password), Pattern) of
+ {match, _} -> ok;
+ nomatch -> {error, "provided password does not match the validator regular expression"}
+ end.
diff --git a/deps/rabbit/src/rabbit_dead_letter.erl b/deps/rabbit/src/rabbit_dead_letter.erl
new file mode 100644
index 0000000000..755de5cf53
--- /dev/null
+++ b/deps/rabbit/src/rabbit_dead_letter.erl
@@ -0,0 +1,253 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_dead_letter).
+
+-export([publish/5]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+%%----------------------------------------------------------------------------
+
+-type reason() :: 'expired' | 'rejected' | 'maxlen' | delivery_limit.
+
+%%----------------------------------------------------------------------------
+
+-spec publish(rabbit_types:message(), reason(), rabbit_types:exchange(),
+ 'undefined' | binary(), rabbit_amqqueue:name()) -> 'ok'.
+publish(Msg, Reason, X, RK, QName) ->
+ DLMsg = make_msg(Msg, Reason, X#exchange.name, RK, QName),
+ Delivery = rabbit_basic:delivery(false, false, DLMsg, undefined),
+ {Queues, Cycles} = detect_cycles(Reason, DLMsg,
+ rabbit_exchange:route(X, Delivery)),
+ lists:foreach(fun log_cycle_once/1, Cycles),
+ _ = rabbit_queue_type:deliver(rabbit_amqqueue:lookup(Queues),
+ Delivery, stateless),
+ ok.
+
+make_msg(Msg = #basic_message{content = Content,
+ exchange_name = Exchange,
+ routing_keys = RoutingKeys},
+ Reason, DLX, RK, #resource{name = QName}) ->
+ {DeathRoutingKeys, HeadersFun1} =
+ case RK of
+ undefined -> {RoutingKeys, fun (H) -> H end};
+ _ -> {[RK], fun (H) -> lists:keydelete(<<"CC">>, 1, H) end}
+ end,
+ ReasonBin = list_to_binary(atom_to_list(Reason)),
+ TimeSec = os:system_time(seconds),
+ PerMsgTTL = per_msg_ttl_header(Content#content.properties),
+ HeadersFun2 =
+ fun (Headers) ->
+ %% The first routing key is the one specified in the
+ %% basic.publish; all others are CC or BCC keys.
+ RKs = [hd(RoutingKeys) | rabbit_basic:header_routes(Headers)],
+ RKs1 = [{longstr, Key} || Key <- RKs],
+ Info = [{<<"reason">>, longstr, ReasonBin},
+ {<<"queue">>, longstr, QName},
+ {<<"time">>, timestamp, TimeSec},
+ {<<"exchange">>, longstr, Exchange#resource.name},
+ {<<"routing-keys">>, array, RKs1}] ++ PerMsgTTL,
+ HeadersFun1(update_x_death_header(Info, Headers))
+ end,
+ Content1 = #content{properties = Props} =
+ rabbit_basic:map_headers(HeadersFun2, Content),
+ Content2 = Content1#content{properties =
+ Props#'P_basic'{expiration = undefined}},
+ Msg#basic_message{exchange_name = DLX,
+ id = rabbit_guid:gen(),
+ routing_keys = DeathRoutingKeys,
+ content = Content2}.
+
+
+x_death_event_key(Info, Key) ->
+ case lists:keysearch(Key, 1, Info) of
+ false -> undefined;
+ {value, {Key, _KeyType, Val}} -> Val
+ end.
+
+maybe_append_to_event_group(Table, _Key, _SeenKeys, []) ->
+ [Table];
+maybe_append_to_event_group(Table, {_Queue, _Reason} = Key, SeenKeys, Acc) ->
+ case sets:is_element(Key, SeenKeys) of
+ true -> Acc;
+ false -> [Table | Acc]
+ end.
+
+group_by_queue_and_reason([]) ->
+ [];
+group_by_queue_and_reason([Table]) ->
+ [Table];
+group_by_queue_and_reason(Tables) ->
+ {_, Grouped} =
+ lists:foldl(
+ fun ({table, Info}, {SeenKeys, Acc}) ->
+ Q = x_death_event_key(Info, <<"queue">>),
+ R = x_death_event_key(Info, <<"reason">>),
+ Matcher = queue_and_reason_matcher(Q, R),
+ {Matches, _} = lists:partition(Matcher, Tables),
+ {Augmented, N} = case Matches of
+ [X] -> {X, 1};
+ [X|_] = Xs -> {X, length(Xs)}
+ end,
+ Key = {Q, R},
+ Acc1 = maybe_append_to_event_group(
+ ensure_xdeath_event_count(Augmented, N),
+ Key, SeenKeys, Acc),
+ {sets:add_element(Key, SeenKeys), Acc1}
+ end, {sets:new(), []}, Tables),
+ Grouped.
+
+update_x_death_header(Info, undefined) ->
+ update_x_death_header(Info, []);
+update_x_death_header(Info, Headers) ->
+ X = x_death_event_key(Info, <<"exchange">>),
+ Q = x_death_event_key(Info, <<"queue">>),
+ R = x_death_event_key(Info, <<"reason">>),
+ case rabbit_basic:header(<<"x-death">>, Headers) of
+ undefined ->
+ %% First x-death event gets its own top-level headers.
+ %% See rabbitmq/rabbitmq-server#1332.
+ Headers2 = rabbit_misc:set_table_value(Headers, <<"x-first-death-reason">>,
+ longstr, R),
+ Headers3 = rabbit_misc:set_table_value(Headers2, <<"x-first-death-queue">>,
+ longstr, Q),
+ Headers4 = rabbit_misc:set_table_value(Headers3, <<"x-first-death-exchange">>,
+ longstr, X),
+ rabbit_basic:prepend_table_header(
+ <<"x-death">>,
+ [{<<"count">>, long, 1} | Info], Headers4);
+ {<<"x-death">>, array, Tables} ->
+ %% group existing x-death headers in case we have some from
+ %% before rabbitmq-server#78
+ GroupedTables = group_by_queue_and_reason(Tables),
+ {Matches, Others} = lists:partition(
+ queue_and_reason_matcher(Q, R),
+ GroupedTables),
+ Info1 = case Matches of
+ [] ->
+ [{<<"count">>, long, 1} | Info];
+ [{table, M}] ->
+ increment_xdeath_event_count(M)
+ end,
+ rabbit_misc:set_table_value(
+ Headers, <<"x-death">>, array,
+ [{table, rabbit_misc:sort_field_table(Info1)} | Others]);
+ {<<"x-death">>, InvalidType, Header} ->
+ rabbit_log:warning("Message has invalid x-death header (type: ~p)."
+ " Resetting header ~p~n",
+ [InvalidType, Header]),
+ %% if x-death is something other than an array (list)
+ %% then we reset it: this happens when some clients consume
+ %% a message and re-publish is, converting header values
+ %% to strings, intentionally or not.
+ %% See rabbitmq/rabbitmq-server#767 for details.
+ rabbit_misc:set_table_value(
+ Headers, <<"x-death">>, array,
+ [{table, [{<<"count">>, long, 1} | Info]}])
+ end.
+
+ensure_xdeath_event_count({table, Info}, InitialVal) when InitialVal >= 1 ->
+ {table, ensure_xdeath_event_count(Info, InitialVal)};
+ensure_xdeath_event_count(Info, InitialVal) when InitialVal >= 1 ->
+ case x_death_event_key(Info, <<"count">>) of
+ undefined ->
+ [{<<"count">>, long, InitialVal} | Info];
+ _ ->
+ Info
+ end.
+
+increment_xdeath_event_count(Info) ->
+ case x_death_event_key(Info, <<"count">>) of
+ undefined ->
+ [{<<"count">>, long, 1} | Info];
+ N ->
+ lists:keyreplace(
+ <<"count">>, 1, Info,
+ {<<"count">>, long, N + 1})
+ end.
+
+queue_and_reason_matcher(Q, R) ->
+ F = fun(Info) ->
+ x_death_event_key(Info, <<"queue">>) =:= Q
+ andalso x_death_event_key(Info, <<"reason">>) =:= R
+ end,
+ fun({table, Info}) ->
+ F(Info);
+ (Info) when is_list(Info) ->
+ F(Info)
+ end.
+
+per_msg_ttl_header(#'P_basic'{expiration = undefined}) ->
+ [];
+per_msg_ttl_header(#'P_basic'{expiration = Expiration}) ->
+ [{<<"original-expiration">>, longstr, Expiration}];
+per_msg_ttl_header(_) ->
+ [].
+
+detect_cycles(rejected, _Msg, Queues) ->
+ {Queues, []};
+
+detect_cycles(_Reason, #basic_message{content = Content}, Queues) ->
+ #content{properties = #'P_basic'{headers = Headers}} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ NoCycles = {Queues, []},
+ case Headers of
+ undefined ->
+ NoCycles;
+ _ ->
+ case rabbit_misc:table_lookup(Headers, <<"x-death">>) of
+ {array, Deaths} ->
+ {Cycling, NotCycling} =
+ lists:partition(fun (#resource{name = Queue}) ->
+ is_cycle(Queue, Deaths)
+ end, Queues),
+ OldQueues = [rabbit_misc:table_lookup(D, <<"queue">>) ||
+ {table, D} <- Deaths],
+ OldQueues1 = [QName || {longstr, QName} <- OldQueues],
+ {NotCycling, [[QName | OldQueues1] ||
+ #resource{name = QName} <- Cycling]};
+ _ ->
+ NoCycles
+ end
+ end.
+
+is_cycle(Queue, Deaths) ->
+ {Cycle, Rest} =
+ lists:splitwith(
+ fun ({table, D}) ->
+ {longstr, Queue} =/= rabbit_misc:table_lookup(D, <<"queue">>);
+ (_) ->
+ true
+ end, Deaths),
+ %% Is there a cycle, and if so, is it "fully automatic", i.e. with
+ %% no reject in it?
+ case Rest of
+ [] -> false;
+ [H|_] -> lists:all(
+ fun ({table, D}) ->
+ {longstr, <<"rejected">>} =/=
+ rabbit_misc:table_lookup(D, <<"reason">>);
+ (_) ->
+ %% There was something we didn't expect, therefore
+ %% a client must have put it there, therefore the
+ %% cycle was not "fully automatic".
+ false
+ end, Cycle ++ [H])
+ end.
+
+log_cycle_once(Queues) ->
+ Key = {queue_cycle, Queues},
+ case get(Key) of
+ true -> ok;
+ undefined -> rabbit_log:warning(
+ "Message dropped. Dead-letter queues cycle detected" ++
+ ": ~p~nThis cycle will NOT be reported again.~n",
+ [Queues]),
+ put(Key, true)
+ end.
diff --git a/deps/rabbit/src/rabbit_definitions.erl b/deps/rabbit/src/rabbit_definitions.erl
new file mode 100644
index 0000000000..0d0212dbae
--- /dev/null
+++ b/deps/rabbit/src/rabbit_definitions.erl
@@ -0,0 +1,767 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_definitions).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([boot/0]).
+%% automatic import on boot
+-export([maybe_load_definitions/0, maybe_load_definitions/2, maybe_load_definitions_from/2,
+ has_configured_definitions_to_load/0]).
+%% import
+-export([import_raw/1, import_raw/2, import_parsed/1, import_parsed/2,
+ apply_defs/2, apply_defs/3, apply_defs/4, apply_defs/5]).
+
+-export([all_definitions/0]).
+-export([
+ list_users/0, list_vhosts/0, list_permissions/0, list_topic_permissions/0,
+ list_runtime_parameters/0, list_global_runtime_parameters/0, list_policies/0,
+ list_exchanges/0, list_queues/0, list_bindings/0,
+ is_internal_parameter/1
+]).
+-export([decode/1, decode/2, args/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+%%
+%% API
+%%
+
+-type definition_category() :: 'users' |
+ 'vhosts' |
+ 'permissions' |
+ 'topic_permissions' |
+ 'parameters' |
+ 'global_parameters' |
+ 'policies' |
+ 'queues' |
+ 'bindings' |
+ 'exchanges'.
+
+-type definition_object() :: #{binary() => any()}.
+-type definition_list() :: [definition_object()].
+
+-type definitions() :: #{
+ definition_category() => definition_list()
+}.
+
+-export_type([definition_object/0, definition_list/0, definition_category/0, definitions/0]).
+
+-define(IMPORT_WORK_POOL, definition_import_pool).
+
+boot() ->
+ PoolSize = application:get_env(rabbit, definition_import_work_pool_size, rabbit_runtime:guess_number_of_cpu_cores()),
+ rabbit_sup:start_supervisor_child(definition_import_pool_sup, worker_pool_sup, [PoolSize, ?IMPORT_WORK_POOL]).
+
+maybe_load_definitions() ->
+ %% Note that management.load_definitions is handled in the plugin for backwards compatibility.
+ %% This executes the "core" version of load_definitions.
+ maybe_load_definitions(rabbit, load_definitions).
+
+-spec import_raw(Body :: binary() | iolist()) -> ok | {error, term()}.
+import_raw(Body) ->
+ rabbit_log:info("Asked to import definitions. Acting user: ~s", [?INTERNAL_USER]),
+ case decode([], Body) of
+ {error, E} -> {error, E};
+ {ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER)
+ end.
+
+-spec import_raw(Body :: binary() | iolist(), VHost :: vhost:name()) -> ok | {error, term()}.
+import_raw(Body, VHost) ->
+ rabbit_log:info("Asked to import definitions. Acting user: ~s", [?INTERNAL_USER]),
+ case decode([], Body) of
+ {error, E} -> {error, E};
+ {ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER, fun() -> ok end, VHost)
+ end.
+
+-spec import_parsed(Defs :: #{any() => any()} | list()) -> ok | {error, term()}.
+import_parsed(Body0) when is_list(Body0) ->
+ import_parsed(maps:from_list(Body0));
+import_parsed(Body0) when is_map(Body0) ->
+ rabbit_log:info("Asked to import definitions. Acting user: ~s", [?INTERNAL_USER]),
+ Body = atomise_map_keys(Body0),
+ apply_defs(Body, ?INTERNAL_USER).
+
+-spec import_parsed(Defs :: #{any() => any() | list()}, VHost :: vhost:name()) -> ok | {error, term()}.
+import_parsed(Body0, VHost) when is_list(Body0) ->
+ import_parsed(maps:from_list(Body0), VHost);
+import_parsed(Body0, VHost) ->
+ rabbit_log:info("Asked to import definitions. Acting user: ~s", [?INTERNAL_USER]),
+ Body = atomise_map_keys(Body0),
+ apply_defs(Body, ?INTERNAL_USER, fun() -> ok end, VHost).
+
+-spec all_definitions() -> map().
+all_definitions() ->
+ Xs = list_exchanges(),
+ Qs = list_queues(),
+ Bs = list_bindings(),
+
+ Users = list_users(),
+ VHosts = list_vhosts(),
+ Params = list_runtime_parameters(),
+ GParams = list_global_runtime_parameters(),
+ Pols = list_policies(),
+
+ Perms = list_permissions(),
+ TPerms = list_topic_permissions(),
+
+ {ok, Vsn} = application:get_key(rabbit, vsn),
+ #{
+ rabbit_version => rabbit_data_coercion:to_binary(Vsn),
+ rabbitmq_version => rabbit_data_coercion:to_binary(Vsn),
+ users => Users,
+ vhosts => VHosts,
+ permissions => Perms,
+ topic_permissions => TPerms,
+ parameters => Params,
+ global_parameters => GParams,
+ policies => Pols,
+ queues => Qs,
+ bindings => Bs,
+ exchanges => Xs
+ }.
+
+%%
+%% Implementation
+%%
+
+-spec has_configured_definitions_to_load() -> boolean().
+has_configured_definitions_to_load() ->
+ case application:get_env(rabbit, load_definitions) of
+ undefined -> false;
+ {ok, none} -> false;
+ {ok, _Path} -> true
+ end.
+
+maybe_load_definitions(App, Key) ->
+ case application:get_env(App, Key) of
+ undefined ->
+ rabbit_log:debug("No definition file configured to import via load_definitions"),
+ ok;
+ {ok, none} ->
+ rabbit_log:debug("No definition file configured to import via load_definitions"),
+ ok;
+ {ok, FileOrDir} ->
+ rabbit_log:debug("Will import definitions file from load_definitions"),
+ IsDir = filelib:is_dir(FileOrDir),
+ maybe_load_definitions_from(IsDir, FileOrDir)
+ end.
+
+maybe_load_definitions_from(true, Dir) ->
+ rabbit_log:info("Applying definitions from directory ~s", [Dir]),
+ load_definitions_from_files(file:list_dir(Dir), Dir);
+maybe_load_definitions_from(false, File) ->
+ load_definitions_from_file(File).
+
+load_definitions_from_files({ok, Filenames0}, Dir) ->
+ Filenames1 = lists:sort(Filenames0),
+ Filenames2 = [filename:join(Dir, F) || F <- Filenames1],
+ load_definitions_from_filenames(Filenames2);
+load_definitions_from_files({error, E}, Dir) ->
+ rabbit_log:error("Could not read definitions from directory ~s, Error: ~p", [Dir, E]),
+ {error, {could_not_read_defs, E}}.
+
+load_definitions_from_filenames([]) ->
+ ok;
+load_definitions_from_filenames([File|Rest]) ->
+ case load_definitions_from_file(File) of
+ ok -> load_definitions_from_filenames(Rest);
+ {error, E} -> {error, {failed_to_import_definitions, File, E}}
+ end.
+
+load_definitions_from_file(File) ->
+ case file:read_file(File) of
+ {ok, Body} ->
+ rabbit_log:info("Applying definitions from file at '~s'", [File]),
+ import_raw(Body);
+ {error, E} ->
+ rabbit_log:error("Could not read definitions from file at '~s', error: ~p", [File, E]),
+ {error, {could_not_read_defs, {File, E}}}
+ end.
+
+decode(Keys, Body) ->
+ case decode(Body) of
+ {ok, J0} ->
+ J = maps:fold(fun(K, V, Acc) ->
+ Acc#{rabbit_data_coercion:to_atom(K, utf8) => V}
+ end, J0, J0),
+ Results = [get_or_missing(K, J) || K <- Keys],
+ case [E || E = {key_missing, _} <- Results] of
+ [] -> {ok, Results, J};
+ Errors -> {error, Errors}
+ end;
+ Else -> Else
+ end.
+
+decode(<<"">>) ->
+ {ok, #{}};
+decode(Body) ->
+ try
+ Decoded = rabbit_json:decode(Body),
+ Normalised = atomise_map_keys(Decoded),
+ {ok, Normalised}
+ catch error:_ -> {error, not_json}
+ end.
+
+atomise_map_keys(Decoded) ->
+ maps:fold(fun(K, V, Acc) ->
+ Acc#{rabbit_data_coercion:to_atom(K, utf8) => V}
+ end, Decoded, Decoded).
+
+-spec apply_defs(Map :: #{atom() => any()}, ActingUser :: rabbit_types:username()) -> 'ok' | {error, term()}.
+
+apply_defs(Map, ActingUser) ->
+ apply_defs(Map, ActingUser, fun () -> ok end).
+
+-spec apply_defs(Map :: #{atom() => any()}, ActingUser :: rabbit_types:username(),
+ SuccessFun :: fun(() -> 'ok')) -> 'ok' | {error, term()};
+ (Map :: #{atom() => any()}, ActingUser :: rabbit_types:username(),
+ VHost :: vhost:name()) -> 'ok' | {error, term()}.
+
+apply_defs(Map, ActingUser, VHost) when is_binary(VHost) ->
+ apply_defs(Map, ActingUser, fun () -> ok end, VHost);
+
+apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) ->
+ Version = maps:get(rabbitmq_version, Map, maps:get(rabbit_version, Map, undefined)),
+ try
+ concurrent_for_all(users, ActingUser, Map,
+ fun(User, _Username) ->
+ rabbit_auth_backend_internal:put_user(User, Version, ActingUser)
+ end),
+ concurrent_for_all(vhosts, ActingUser, Map, fun add_vhost/2),
+ validate_limits(Map),
+ concurrent_for_all(permissions, ActingUser, Map, fun add_permission/2),
+ concurrent_for_all(topic_permissions, ActingUser, Map, fun add_topic_permission/2),
+ sequential_for_all(parameters, ActingUser, Map, fun add_parameter/2),
+ sequential_for_all(global_parameters, ActingUser, Map, fun add_global_parameter/2),
+ %% importing policies concurrently can be unsafe as queues will be getting
+ %% potentially out of order notifications of applicable policy changes
+ sequential_for_all(policies, ActingUser, Map, fun add_policy/2),
+ concurrent_for_all(queues, ActingUser, Map, fun add_queue/2),
+ concurrent_for_all(exchanges, ActingUser, Map, fun add_exchange/2),
+ concurrent_for_all(bindings, ActingUser, Map, fun add_binding/2),
+ SuccessFun(),
+ ok
+ catch {error, E} -> {error, E};
+ exit:E -> {error, E}
+ end.
+
+-spec apply_defs(Map :: #{atom() => any()},
+ ActingUser :: rabbit_types:username(),
+ SuccessFun :: fun(() -> 'ok'),
+ VHost :: vhost:name()) -> 'ok' | {error, term()}.
+
+apply_defs(Map, ActingUser, SuccessFun, VHost) when is_binary(VHost) ->
+ rabbit_log:info("Asked to import definitions for a virtual host. Virtual host: ~p, acting user: ~p",
+ [VHost, ActingUser]),
+ try
+ validate_limits(Map, VHost),
+ sequential_for_all(parameters, ActingUser, Map, VHost, fun add_parameter/3),
+ %% importing policies concurrently can be unsafe as queues will be getting
+ %% potentially out of order notifications of applicable policy changes
+ sequential_for_all(policies, ActingUser, Map, VHost, fun add_policy/3),
+ concurrent_for_all(queues, ActingUser, Map, VHost, fun add_queue/3),
+ concurrent_for_all(exchanges, ActingUser, Map, VHost, fun add_exchange/3),
+ concurrent_for_all(bindings, ActingUser, Map, VHost, fun add_binding/3),
+ SuccessFun()
+ catch {error, E} -> {error, format(E)};
+ exit:E -> {error, format(E)}
+ end.
+
+-spec apply_defs(Map :: #{atom() => any()},
+ ActingUser :: rabbit_types:username(),
+ SuccessFun :: fun(() -> 'ok'),
+ ErrorFun :: fun((any()) -> 'ok'),
+ VHost :: vhost:name()) -> 'ok' | {error, term()}.
+
+apply_defs(Map, ActingUser, SuccessFun, ErrorFun, VHost) ->
+ rabbit_log:info("Asked to import definitions for a virtual host. Virtual host: ~p, acting user: ~p",
+ [VHost, ActingUser]),
+ try
+ validate_limits(Map, VHost),
+ sequential_for_all(parameters, ActingUser, Map, VHost, fun add_parameter/3),
+ %% importing policies concurrently can be unsafe as queues will be getting
+ %% potentially out of order notifications of applicable policy changes
+ sequential_for_all(policies, ActingUser, Map, VHost, fun add_policy/3),
+ concurrent_for_all(queues, ActingUser, Map, VHost, fun add_queue/3),
+ concurrent_for_all(exchanges, ActingUser, Map, VHost, fun add_exchange/3),
+ concurrent_for_all(bindings, ActingUser, Map, VHost, fun add_binding/3),
+ SuccessFun()
+ catch {error, E} -> ErrorFun(format(E));
+ exit:E -> ErrorFun(format(E))
+ end.
+
+sequential_for_all(Category, ActingUser, Definitions, Fun) ->
+ case maps:get(rabbit_data_coercion:to_atom(Category), Definitions, undefined) of
+ undefined -> ok;
+ List ->
+ case length(List) of
+ 0 -> ok;
+ N -> rabbit_log:info("Importing sequentially ~p ~s...", [N, human_readable_category_name(Category)])
+ end,
+ [begin
+ %% keys are expected to be atoms
+ Fun(atomize_keys(M), ActingUser)
+ end || M <- List, is_map(M)]
+ end.
+
+sequential_for_all(Name, ActingUser, Definitions, VHost, Fun) ->
+ case maps:get(rabbit_data_coercion:to_atom(Name), Definitions, undefined) of
+ undefined -> ok;
+ List -> [Fun(VHost, atomize_keys(M), ActingUser) || M <- List, is_map(M)]
+ end.
+
+concurrent_for_all(Category, ActingUser, Definitions, Fun) ->
+ case maps:get(rabbit_data_coercion:to_atom(Category), Definitions, undefined) of
+ undefined -> ok;
+ List ->
+ case length(List) of
+ 0 -> ok;
+ N -> rabbit_log:info("Importing concurrently ~p ~s...", [N, human_readable_category_name(Category)])
+ end,
+ WorkPoolFun = fun(M) ->
+ Fun(atomize_keys(M), ActingUser)
+ end,
+ do_concurrent_for_all(List, WorkPoolFun)
+ end.
+
+concurrent_for_all(Name, ActingUser, Definitions, VHost, Fun) ->
+ case maps:get(rabbit_data_coercion:to_atom(Name), Definitions, undefined) of
+ undefined -> ok;
+ List ->
+ WorkPoolFun = fun(M) ->
+ Fun(VHost, atomize_keys(M), ActingUser)
+ end,
+ do_concurrent_for_all(List, WorkPoolFun)
+ end.
+
+do_concurrent_for_all(List, WorkPoolFun) ->
+ {ok, Gatherer} = gatherer:start_link(),
+ [begin
+ %% keys are expected to be atoms
+ ok = gatherer:fork(Gatherer),
+ worker_pool:submit_async(
+ ?IMPORT_WORK_POOL,
+ fun() ->
+ try
+ WorkPoolFun(M)
+ catch {error, E} -> gatherer:in(Gatherer, {error, E});
+ _:E -> gatherer:in(Gatherer, {error, E})
+ end,
+ gatherer:finish(Gatherer)
+ end)
+ end || M <- List, is_map(M)],
+ case gatherer:out(Gatherer) of
+ empty ->
+ ok = gatherer:stop(Gatherer);
+ {value, {error, E}} ->
+ ok = gatherer:stop(Gatherer),
+ throw({error, E})
+ end.
+
+-spec atomize_keys(#{any() => any()}) -> #{atom() => any()}.
+
+atomize_keys(M) ->
+ maps:fold(fun(K, V, Acc) ->
+ maps:put(rabbit_data_coercion:to_atom(K), V, Acc)
+ end, #{}, M).
+
+-spec human_readable_category_name(definition_category()) -> string().
+
+human_readable_category_name(topic_permissions) -> "topic permissions";
+human_readable_category_name(parameters) -> "runtime parameters";
+human_readable_category_name(global_parameters) -> "global runtime parameters";
+human_readable_category_name(Other) -> rabbit_data_coercion:to_list(Other).
+
+
+format(#amqp_error{name = Name, explanation = Explanation}) ->
+ rabbit_data_coercion:to_binary(rabbit_misc:format("~s: ~s", [Name, Explanation]));
+format({no_such_vhost, undefined}) ->
+ rabbit_data_coercion:to_binary(
+ "Virtual host does not exist and is not specified in definitions file.");
+format({no_such_vhost, VHost}) ->
+ rabbit_data_coercion:to_binary(
+ rabbit_misc:format("Please create virtual host \"~s\" prior to importing definitions.",
+ [VHost]));
+format({vhost_limit_exceeded, ErrMsg}) ->
+ rabbit_data_coercion:to_binary(ErrMsg);
+format(E) ->
+ rabbit_data_coercion:to_binary(rabbit_misc:format("~p", [E])).
+
+add_parameter(Param, Username) ->
+ VHost = maps:get(vhost, Param, undefined),
+ add_parameter(VHost, Param, Username).
+
+add_parameter(VHost, Param, Username) ->
+ Comp = maps:get(component, Param, undefined),
+ Key = maps:get(name, Param, undefined),
+ Term = maps:get(value, Param, undefined),
+ Result = case is_map(Term) of
+ true ->
+ %% coerce maps to proplists for backwards compatibility.
+ %% See rabbitmq-management#528.
+ TermProplist = rabbit_data_coercion:to_proplist(Term),
+ rabbit_runtime_parameters:set(VHost, Comp, Key, TermProplist, Username);
+ _ ->
+ rabbit_runtime_parameters:set(VHost, Comp, Key, Term, Username)
+ end,
+ case Result of
+ ok -> ok;
+ {error_string, E} ->
+ S = rabbit_misc:format(" (~s/~s/~s)", [VHost, Comp, Key]),
+ exit(rabbit_data_coercion:to_binary(rabbit_misc:escape_html_tags(E ++ S)))
+ end.
+
+add_global_parameter(Param, Username) ->
+ Key = maps:get(name, Param, undefined),
+ Term = maps:get(value, Param, undefined),
+ case is_map(Term) of
+ true ->
+ %% coerce maps to proplists for backwards compatibility.
+ %% See rabbitmq-management#528.
+ TermProplist = rabbit_data_coercion:to_proplist(Term),
+ rabbit_runtime_parameters:set_global(Key, TermProplist, Username);
+ _ ->
+ rabbit_runtime_parameters:set_global(Key, Term, Username)
+ end.
+
+add_policy(Param, Username) ->
+ VHost = maps:get(vhost, Param, undefined),
+ add_policy(VHost, Param, Username).
+
+add_policy(VHost, Param, Username) ->
+ Key = maps:get(name, Param, undefined),
+ case rabbit_policy:set(
+ VHost, Key, maps:get(pattern, Param, undefined),
+ case maps:get(definition, Param, undefined) of
+ undefined -> undefined;
+ Def -> rabbit_data_coercion:to_proplist(Def)
+ end,
+ maps:get(priority, Param, undefined),
+ maps:get('apply-to', Param, <<"all">>),
+ Username) of
+ ok -> ok;
+ {error_string, E} -> S = rabbit_misc:format(" (~s/~s)", [VHost, Key]),
+ exit(rabbit_data_coercion:to_binary(rabbit_misc:escape_html_tags(E ++ S)))
+ end.
+
+-spec add_vhost(map(), rabbit_types:username()) -> ok.
+
+add_vhost(VHost, ActingUser) ->
+ VHostName = maps:get(name, VHost, undefined),
+ VHostTrace = maps:get(tracing, VHost, undefined),
+ VHostDefinition = maps:get(definition, VHost, undefined),
+ VHostTags = maps:get(tags, VHost, undefined),
+ rabbit_vhost:put_vhost(VHostName, VHostDefinition, VHostTags, VHostTrace, ActingUser).
+
+add_permission(Permission, ActingUser) ->
+ rabbit_auth_backend_internal:set_permissions(maps:get(user, Permission, undefined),
+ maps:get(vhost, Permission, undefined),
+ maps:get(configure, Permission, undefined),
+ maps:get(write, Permission, undefined),
+ maps:get(read, Permission, undefined),
+ ActingUser).
+
+add_topic_permission(TopicPermission, ActingUser) ->
+ rabbit_auth_backend_internal:set_topic_permissions(
+ maps:get(user, TopicPermission, undefined),
+ maps:get(vhost, TopicPermission, undefined),
+ maps:get(exchange, TopicPermission, undefined),
+ maps:get(write, TopicPermission, undefined),
+ maps:get(read, TopicPermission, undefined),
+ ActingUser).
+
+add_queue(Queue, ActingUser) ->
+ add_queue_int(Queue, r(queue, Queue), ActingUser).
+
+add_queue(VHost, Queue, ActingUser) ->
+ add_queue_int(Queue, rv(VHost, queue, Queue), ActingUser).
+
+add_queue_int(_Queue, R = #resource{kind = queue,
+ name = <<"amq.", _/binary>>}, ActingUser) ->
+ Name = R#resource.name,
+ rabbit_log:warning("Skipping import of a queue whose name begins with 'amq.', "
+ "name: ~s, acting user: ~s", [Name, ActingUser]);
+add_queue_int(Queue, Name, ActingUser) ->
+ rabbit_amqqueue:declare(Name,
+ maps:get(durable, Queue, undefined),
+ maps:get(auto_delete, Queue, undefined),
+ args(maps:get(arguments, Queue, undefined)),
+ none,
+ ActingUser).
+
+add_exchange(Exchange, ActingUser) ->
+ add_exchange_int(Exchange, r(exchange, Exchange), ActingUser).
+
+add_exchange(VHost, Exchange, ActingUser) ->
+ add_exchange_int(Exchange, rv(VHost, exchange, Exchange), ActingUser).
+
+add_exchange_int(_Exchange, #resource{kind = exchange, name = <<"">>}, ActingUser) ->
+ rabbit_log:warning("Not importing the default exchange, acting user: ~s", [ActingUser]);
+add_exchange_int(_Exchange, R = #resource{kind = exchange,
+ name = <<"amq.", _/binary>>}, ActingUser) ->
+ Name = R#resource.name,
+ rabbit_log:warning("Skipping import of an exchange whose name begins with 'amq.', "
+ "name: ~s, acting user: ~s", [Name, ActingUser]);
+add_exchange_int(Exchange, Name, ActingUser) ->
+ Internal = case maps:get(internal, Exchange, undefined) of
+ undefined -> false; %% =< 2.2.0
+ I -> I
+ end,
+ rabbit_exchange:declare(Name,
+ rabbit_exchange:check_type(maps:get(type, Exchange, undefined)),
+ maps:get(durable, Exchange, undefined),
+ maps:get(auto_delete, Exchange, undefined),
+ Internal,
+ args(maps:get(arguments, Exchange, undefined)),
+ ActingUser).
+
+add_binding(Binding, ActingUser) ->
+ DestType = dest_type(Binding),
+ add_binding_int(Binding, r(exchange, source, Binding),
+ r(DestType, destination, Binding), ActingUser).
+
+add_binding(VHost, Binding, ActingUser) ->
+ DestType = dest_type(Binding),
+ add_binding_int(Binding, rv(VHost, exchange, source, Binding),
+ rv(VHost, DestType, destination, Binding), ActingUser).
+
+add_binding_int(Binding, Source, Destination, ActingUser) ->
+ rabbit_binding:add(
+ #binding{source = Source,
+ destination = Destination,
+ key = maps:get(routing_key, Binding, undefined),
+ args = args(maps:get(arguments, Binding, undefined))},
+ ActingUser).
+
+dest_type(Binding) ->
+ rabbit_data_coercion:to_atom(maps:get(destination_type, Binding, undefined)).
+
+r(Type, Props) -> r(Type, name, Props).
+
+r(Type, Name, Props) ->
+ rabbit_misc:r(maps:get(vhost, Props, undefined), Type, maps:get(Name, Props, undefined)).
+
+rv(VHost, Type, Props) -> rv(VHost, Type, name, Props).
+
+rv(VHost, Type, Name, Props) ->
+ rabbit_misc:r(VHost, Type, maps:get(Name, Props, undefined)).
+
+%%--------------------------------------------------------------------
+
+validate_limits(All) ->
+ case maps:get(queues, All, undefined) of
+ undefined -> ok;
+ Queues0 ->
+ {ok, VHostMap} = filter_out_existing_queues(Queues0),
+ maps:fold(fun validate_vhost_limit/3, ok, VHostMap)
+ end.
+
+validate_limits(All, VHost) ->
+ case maps:get(queues, All, undefined) of
+ undefined -> ok;
+ Queues0 ->
+ Queues1 = filter_out_existing_queues(VHost, Queues0),
+ AddCount = length(Queues1),
+ validate_vhost_limit(VHost, AddCount, ok)
+ end.
+
+filter_out_existing_queues(Queues) ->
+ build_filtered_map(Queues, maps:new()).
+
+filter_out_existing_queues(VHost, Queues) ->
+ Pred = fun(Queue) ->
+ Rec = rv(VHost, queue, <<"name">>, Queue),
+ case rabbit_amqqueue:lookup(Rec) of
+ {ok, _} -> false;
+ {error, not_found} -> true
+ end
+ end,
+ lists:filter(Pred, Queues).
+
+build_queue_data(Queue) ->
+ VHost = maps:get(<<"vhost">>, Queue, undefined),
+ Rec = rv(VHost, queue, <<"name">>, Queue),
+ {Rec, VHost}.
+
+build_filtered_map([], AccMap) ->
+ {ok, AccMap};
+build_filtered_map([Queue|Rest], AccMap0) ->
+ {Rec, VHost} = build_queue_data(Queue),
+ case rabbit_amqqueue:lookup(Rec) of
+ {error, not_found} ->
+ AccMap1 = maps:update_with(VHost, fun(V) -> V + 1 end, 1, AccMap0),
+ build_filtered_map(Rest, AccMap1);
+ {ok, _} ->
+ build_filtered_map(Rest, AccMap0)
+ end.
+
+validate_vhost_limit(VHost, AddCount, ok) ->
+ WouldExceed = rabbit_vhost_limit:would_exceed_queue_limit(AddCount, VHost),
+ validate_vhost_queue_limit(VHost, AddCount, WouldExceed).
+
+validate_vhost_queue_limit(_VHost, 0, _) ->
+ % Note: not adding any new queues so the upload
+ % must be update-only
+ ok;
+validate_vhost_queue_limit(_VHost, _AddCount, false) ->
+ % Note: would not exceed queue limit
+ ok;
+validate_vhost_queue_limit(VHost, AddCount, {true, Limit, QueueCount}) ->
+ ErrFmt = "Adding ~B queue(s) to virtual host \"~s\" would exceed the limit of ~B queue(s).~n~nThis virtual host currently has ~B queue(s) defined.~n~nImport aborted!",
+ ErrInfo = [AddCount, VHost, Limit, QueueCount],
+ ErrMsg = rabbit_misc:format(ErrFmt, ErrInfo),
+ exit({vhost_limit_exceeded, ErrMsg}).
+
+get_or_missing(K, L) ->
+ case maps:get(K, L, undefined) of
+ undefined -> {key_missing, K};
+ V -> V
+ end.
+
+args([]) -> args(#{});
+args(L) -> rabbit_misc:to_amqp_table(L).
+
+%%
+%% Export
+%%
+
+list_exchanges() ->
+ %% exclude internal exchanges, they are not meant to be declared or used by
+ %% applications
+ [exchange_definition(X) || X <- lists:filter(fun(#exchange{internal = true}) -> false;
+ (#exchange{name = #resource{name = <<>>}}) -> false;
+ (X) -> not rabbit_exchange:is_amq_prefixed(X)
+ end,
+ rabbit_exchange:list())].
+
+exchange_definition(#exchange{name = #resource{virtual_host = VHost, name = Name},
+ type = Type,
+ durable = Durable, auto_delete = AD, arguments = Args}) ->
+ #{<<"vhost">> => VHost,
+ <<"name">> => Name,
+ <<"type">> => Type,
+ <<"durable">> => Durable,
+ <<"auto_delete">> => AD,
+ <<"arguments">> => rabbit_misc:amqp_table(Args)}.
+
+list_queues() ->
+ %% exclude exclusive queues, they cannot be restored
+ [queue_definition(Q) || Q <- lists:filter(fun(Q0) ->
+ amqqueue:get_exclusive_owner(Q0) =:= none
+ end,
+ rabbit_amqqueue:list())].
+
+queue_definition(Q) ->
+ #resource{virtual_host = VHost, name = Name} = amqqueue:get_name(Q),
+ Type = case amqqueue:get_type(Q) of
+ rabbit_classic_queue -> classic;
+ rabbit_quorum_queue -> quorum;
+ rabbit_stream_queue -> stream;
+ T -> T
+ end,
+ #{
+ <<"vhost">> => VHost,
+ <<"name">> => Name,
+ <<"type">> => Type,
+ <<"durable">> => amqqueue:is_durable(Q),
+ <<"auto_delete">> => amqqueue:is_auto_delete(Q),
+ <<"arguments">> => rabbit_misc:amqp_table(amqqueue:get_arguments(Q))
+ }.
+
+list_bindings() ->
+ [binding_definition(B) || B <- rabbit_binding:list_explicit()].
+
+binding_definition(#binding{source = S,
+ key = RoutingKey,
+ destination = D,
+ args = Args}) ->
+ #{
+ <<"source">> => S#resource.name,
+ <<"vhost">> => S#resource.virtual_host,
+ <<"destination">> => D#resource.name,
+ <<"destination_type">> => D#resource.kind,
+ <<"routing_key">> => RoutingKey,
+ <<"arguments">> => rabbit_misc:amqp_table(Args)
+ }.
+
+list_vhosts() ->
+ [vhost_definition(V) || V <- rabbit_vhost:all()].
+
+vhost_definition(VHost) ->
+ #{
+ <<"name">> => vhost:get_name(VHost),
+ <<"limits">> => vhost:get_limits(VHost),
+ <<"metadata">> => vhost:get_metadata(VHost)
+ }.
+
+list_users() ->
+ [user_definition(U) || U <- rabbit_auth_backend_internal:all_users()].
+
+user_definition(User) ->
+ #{<<"name">> => internal_user:get_username(User),
+ <<"password_hash">> => base64:encode(internal_user:get_password_hash(User)),
+ <<"hashing_algorithm">> => rabbit_auth_backend_internal:hashing_module_for_user(User),
+ <<"tags">> => tags_as_binaries(internal_user:get_tags(User)),
+ <<"limits">> => internal_user:get_limits(User)
+ }.
+
+list_runtime_parameters() ->
+ [runtime_parameter_definition(P) || P <- rabbit_runtime_parameters:list(), is_list(P)].
+
+runtime_parameter_definition(Param) ->
+ #{
+ <<"vhost">> => pget(vhost, Param),
+ <<"component">> => pget(component, Param),
+ <<"name">> => pget(name, Param),
+ <<"value">> => maps:from_list(pget(value, Param))
+ }.
+
+list_global_runtime_parameters() ->
+ [global_runtime_parameter_definition(P) || P <- rabbit_runtime_parameters:list_global(), not is_internal_parameter(P)].
+
+global_runtime_parameter_definition(P0) ->
+ P = [{rabbit_data_coercion:to_binary(K), V} || {K, V} <- P0],
+ maps:from_list(P).
+
+-define(INTERNAL_GLOBAL_PARAM_PREFIX, "internal").
+
+is_internal_parameter(Param) ->
+ Name = rabbit_data_coercion:to_list(pget(name, Param)),
+ %% if global parameter name starts with an "internal", consider it to be internal
+ %% and exclude it from definition export
+ string:left(Name, length(?INTERNAL_GLOBAL_PARAM_PREFIX)) =:= ?INTERNAL_GLOBAL_PARAM_PREFIX.
+
+list_policies() ->
+ [policy_definition(P) || P <- rabbit_policy:list()].
+
+policy_definition(Policy) ->
+ #{
+ <<"vhost">> => pget(vhost, Policy),
+ <<"name">> => pget(name, Policy),
+ <<"pattern">> => pget(pattern, Policy),
+ <<"apply-to">> => pget('apply-to', Policy),
+ <<"priority">> => pget(priority, Policy),
+ <<"definition">> => maps:from_list(pget(definition, Policy))
+ }.
+
+list_permissions() ->
+ [permission_definition(P) || P <- rabbit_auth_backend_internal:list_permissions()].
+
+permission_definition(P0) ->
+ P = [{rabbit_data_coercion:to_binary(K), V} || {K, V} <- P0],
+ maps:from_list(P).
+
+list_topic_permissions() ->
+ [topic_permission_definition(P) || P <- rabbit_auth_backend_internal:list_topic_permissions()].
+
+topic_permission_definition(P0) ->
+ P = [{rabbit_data_coercion:to_binary(K), V} || {K, V} <- P0],
+ maps:from_list(P).
+
+tags_as_binaries(Tags) ->
+ list_to_binary(string:join([atom_to_list(T) || T <- Tags], ",")).
diff --git a/deps/rabbit/src/rabbit_diagnostics.erl b/deps/rabbit/src/rabbit_diagnostics.erl
new file mode 100644
index 0000000000..999596cdc9
--- /dev/null
+++ b/deps/rabbit/src/rabbit_diagnostics.erl
@@ -0,0 +1,119 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_diagnostics).
+
+-define(PROCESS_INFO,
+ [registered_name, current_stacktrace, initial_call, message_queue_len,
+ links, monitors, monitored_by, heap_size]).
+
+-export([maybe_stuck/0, maybe_stuck/1, top_memory_use/0, top_memory_use/1,
+ top_binary_refs/0, top_binary_refs/1]).
+
+maybe_stuck() -> maybe_stuck(5000).
+
+maybe_stuck(Timeout) ->
+ Pids = processes(),
+ io:format("~s There are ~p processes.~n", [get_time(), length(Pids)]),
+ maybe_stuck(Pids, Timeout).
+
+maybe_stuck(Pids, Timeout) when Timeout =< 0 ->
+ io:format("~s Found ~p suspicious processes.~n", [get_time(), length(Pids)]),
+ [io:format("~s ~p~n", [get_time(), info(Pid)]) || Pid <- Pids],
+ ok;
+maybe_stuck(Pids, Timeout) ->
+ Pids2 = [P || P <- Pids, looks_stuck(P)],
+ io:format("~s Investigated ~p processes this round, ~pms to go.~n",
+ [get_time(), length(Pids2), Timeout]),
+ timer:sleep(500),
+ maybe_stuck(Pids2, Timeout - 500).
+
+looks_stuck(Pid) ->
+ case info(Pid, status, gone) of
+ {status, waiting} ->
+ %% It's tempting to just check for message_queue_len > 0
+ %% here rather than mess around with stack traces and
+ %% heuristics. But really, sometimes freshly stuck
+ %% processes can have 0 messages...
+ case info(Pid, current_stacktrace, gone) of
+ {current_stacktrace, [H|_]} ->
+ maybe_stuck_stacktrace(H);
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end.
+
+maybe_stuck_stacktrace({gen_server2, process_next_msg, _}) -> false;
+maybe_stuck_stacktrace({gen_event, fetch_msg, _}) -> false;
+maybe_stuck_stacktrace({prim_inet, accept0, _}) -> false;
+maybe_stuck_stacktrace({prim_inet, recv0, _}) -> false;
+maybe_stuck_stacktrace({rabbit_heartbeat, heartbeater, _}) -> false;
+maybe_stuck_stacktrace({rabbit_net, recv, _}) -> false;
+maybe_stuck_stacktrace({group, _, _}) -> false;
+maybe_stuck_stacktrace({shell, _, _}) -> false;
+maybe_stuck_stacktrace({io, _, _}) -> false;
+maybe_stuck_stacktrace({M, F, A, _}) ->
+ maybe_stuck_stacktrace({M, F, A});
+maybe_stuck_stacktrace({_M, F, _A}) ->
+ case string:str(atom_to_list(F), "loop") of
+ 0 -> true;
+ _ -> false
+ end.
+
+top_memory_use() -> top_memory_use(30).
+
+top_memory_use(Count) ->
+ Pids = processes(),
+ io:format("~s Memory use: top ~p of ~p processes.~n", [get_time(), Count, length(Pids)]),
+ Procs = [{info(Pid, memory, 0), info(Pid)} || Pid <- Pids],
+ Sorted = lists:sublist(lists:reverse(lists:sort(Procs)), Count),
+ io:format("~s ~p~n", [get_time(), Sorted]).
+
+top_binary_refs() -> top_binary_refs(30).
+
+top_binary_refs(Count) ->
+ Pids = processes(),
+ io:format("~s Binary refs: top ~p of ~p processes.~n", [get_time(), Count, length(Pids)]),
+ Procs = [{{binary_refs, binary_refs(Pid)}, info(Pid)} || Pid <- Pids],
+ Sorted = lists:sublist(lists:reverse(lists:sort(Procs)), Count),
+ io:format("~s ~p~n", [get_time(), Sorted]).
+
+binary_refs(Pid) ->
+ case info(Pid, binary, []) of
+ {binary, Refs} ->
+ lists:sum([Sz || {_Ptr, Sz} <- lists:usort([{Ptr, Sz} ||
+ {Ptr, Sz, _Cnt} <- Refs])]);
+ _ -> 0
+ end.
+
+info(Pid) ->
+ [{pid, Pid} | info(Pid, ?PROCESS_INFO, [])].
+
+info(Pid, Infos, Default) ->
+ try
+ process_info(Pid, Infos)
+ catch
+ _:_ -> case is_atom(Infos) of
+ true -> {Infos, Default};
+ false -> Default
+ end
+ end.
+
+get_time() ->
+ {{Y,M,D}, {H,Min,Sec}} = calendar:local_time(),
+ [ integer_to_list(Y), "-",
+ prefix_zero(integer_to_list(M)), "-",
+ prefix_zero(integer_to_list(D)), " ",
+ prefix_zero(integer_to_list(H)), ":",
+ prefix_zero(integer_to_list(Min)), ":",
+ prefix_zero(integer_to_list(Sec))
+ ].
+
+prefix_zero([C]) -> [$0, C];
+prefix_zero([_,_] = Full) -> Full.
diff --git a/deps/rabbit/src/rabbit_direct.erl b/deps/rabbit/src/rabbit_direct.erl
new file mode 100644
index 0000000000..3fc2d75908
--- /dev/null
+++ b/deps/rabbit/src/rabbit_direct.erl
@@ -0,0 +1,235 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_direct).
+
+-export([boot/0, force_event_refresh/1, list/0, connect/5,
+ start_channel/10, disconnect/2]).
+
+-deprecated([{force_event_refresh, 1, eventually}]).
+
+%% Internal
+-export([list_local/0]).
+
+%% For testing only
+-export([extract_extra_auth_props/4]).
+
+-include("rabbit.hrl").
+-include("rabbit_misc.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec boot() -> 'ok'.
+
+boot() -> rabbit_sup:start_supervisor_child(
+ rabbit_direct_client_sup, rabbit_client_sup,
+ [{local, rabbit_direct_client_sup},
+ {rabbit_channel_sup, start_link, []}]).
+
+-spec force_event_refresh(reference()) -> 'ok'.
+
+force_event_refresh(Ref) ->
+ [Pid ! {force_event_refresh, Ref} || Pid <- list()],
+ ok.
+
+-spec list_local() -> [pid()].
+
+list_local() ->
+ pg_local:get_members(rabbit_direct).
+
+-spec list() -> [pid()].
+
+list() ->
+ Nodes = rabbit_nodes:all_running(),
+ rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_direct, list_local, [], ?RPC_TIMEOUT).
+
+%%----------------------------------------------------------------------------
+
+auth_fun({none, _}, _VHost, _ExtraAuthProps) ->
+ fun () -> {ok, rabbit_auth_backend_dummy:user()} end;
+
+auth_fun({Username, none}, _VHost, _ExtraAuthProps) ->
+ fun () -> rabbit_access_control:check_user_login(Username, []) end;
+
+auth_fun({Username, Password}, VHost, ExtraAuthProps) ->
+ fun () ->
+ rabbit_access_control:check_user_login(
+ Username,
+ [{password, Password}, {vhost, VHost}] ++ ExtraAuthProps)
+ end.
+
+-spec connect
+ (({'none', 'none'} | {rabbit_types:username(), 'none'} |
+ {rabbit_types:username(), rabbit_types:password()}),
+ rabbit_types:vhost(), rabbit_types:protocol(), pid(),
+ rabbit_event:event_props()) ->
+ rabbit_types:ok_or_error2(
+ {rabbit_types:user(), rabbit_framing:amqp_table()},
+ 'broker_not_found_on_node' |
+ {'auth_failure', string()} | 'access_refused').
+
+connect(Creds, VHost, Protocol, Pid, Infos) ->
+ ExtraAuthProps = extract_extra_auth_props(Creds, VHost, Pid, Infos),
+ AuthFun = auth_fun(Creds, VHost, ExtraAuthProps),
+ case rabbit:is_running() of
+ true ->
+ case whereis(rabbit_direct_client_sup) of
+ undefined ->
+ {error, broker_is_booting};
+ _ ->
+ case is_over_vhost_connection_limit(VHost, Creds, Pid) of
+ true ->
+ {error, not_allowed};
+ false ->
+ case is_vhost_alive(VHost, Creds, Pid) of
+ false ->
+ {error, {internal_error, vhost_is_down}};
+ true ->
+ case AuthFun() of
+ {ok, User = #user{username = Username}} ->
+ notify_auth_result(Username,
+ user_authentication_success, []),
+ connect1(User, VHost, Protocol, Pid, Infos);
+ {refused, Username, Msg, Args} ->
+ notify_auth_result(Username,
+ user_authentication_failure,
+ [{error, rabbit_misc:format(Msg, Args)}]),
+ {error, {auth_failure, "Refused"}}
+ end %% AuthFun()
+ end %% is_vhost_alive
+ end %% is_over_vhost_connection_limit
+ end;
+ false -> {error, broker_not_found_on_node}
+ end.
+
+extract_extra_auth_props(Creds, VHost, Pid, Infos) ->
+ case extract_protocol(Infos) of
+ undefined ->
+ [];
+ Protocol ->
+ maybe_call_connection_info_module(Protocol, Creds, VHost, Pid, Infos)
+ end.
+
+extract_protocol(Infos) ->
+ case proplists:get_value(protocol, Infos, undefined) of
+ {Protocol, _Version} ->
+ Protocol;
+ _ ->
+ undefined
+ end.
+
+maybe_call_connection_info_module(Protocol, Creds, VHost, Pid, Infos) ->
+ Module = rabbit_data_coercion:to_atom(string:to_lower(
+ "rabbit_" ++
+ lists:flatten(string:replace(rabbit_data_coercion:to_list(Protocol), " ", "_", all)) ++
+ "_connection_info")
+ ),
+ Args = [Creds, VHost, Pid, Infos],
+ code_server_cache:maybe_call_mfa(Module, additional_authn_params, Args, []).
+
+is_vhost_alive(VHost, {Username, _Password}, Pid) ->
+ PrintedUsername = case Username of
+ none -> "";
+ _ -> Username
+ end,
+ case rabbit_vhost_sup_sup:is_vhost_alive(VHost) of
+ true -> true;
+ false ->
+ rabbit_log_connection:error(
+ "Error on Direct connection ~p~n"
+ "access to vhost '~s' refused for user '~s': "
+ "vhost '~s' is down",
+ [Pid, VHost, PrintedUsername, VHost]),
+ false
+ end.
+
+is_over_vhost_connection_limit(VHost, {Username, _Password}, Pid) ->
+ PrintedUsername = case Username of
+ none -> "";
+ _ -> Username
+ end,
+ try rabbit_vhost_limit:is_over_connection_limit(VHost) of
+ false -> false;
+ {true, Limit} ->
+ rabbit_log_connection:error(
+ "Error on Direct connection ~p~n"
+ "access to vhost '~s' refused for user '~s': "
+ "vhost connection limit (~p) is reached",
+ [Pid, VHost, PrintedUsername, Limit]),
+ true
+ catch
+ throw:{error, {no_such_vhost, VHost}} ->
+ rabbit_log_connection:error(
+ "Error on Direct connection ~p~n"
+ "vhost ~s not found", [Pid, VHost]),
+ true
+ end.
+
+notify_auth_result(Username, AuthResult, ExtraProps) ->
+ EventProps = [{connection_type, direct},
+ {name, case Username of none -> ''; _ -> Username end}] ++
+ ExtraProps,
+ rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
+
+connect1(User = #user{username = Username}, VHost, Protocol, Pid, Infos) ->
+ case rabbit_auth_backend_internal:is_over_connection_limit(Username) of
+ false ->
+ % Note: peer_host can be either a tuple or
+ % a binary if reverse_dns_lookups is enabled
+ PeerHost = proplists:get_value(peer_host, Infos),
+ AuthzContext = proplists:get_value(variable_map, Infos, #{}),
+ try rabbit_access_control:check_vhost_access(User, VHost,
+ {ip, PeerHost}, AuthzContext) of
+ ok -> ok = pg_local:join(rabbit_direct, Pid),
+ rabbit_core_metrics:connection_created(Pid, Infos),
+ rabbit_event:notify(connection_created, Infos),
+ {ok, {User, rabbit_reader:server_properties(Protocol)}}
+ catch
+ exit:#amqp_error{name = Reason = not_allowed} ->
+ {error, Reason}
+ end;
+ {true, Limit} ->
+ rabbit_log_connection:error(
+ "Error on Direct connection ~p~n"
+ "access refused for user '~s': "
+ "user connection limit (~p) is reached",
+ [Pid, Username, Limit]),
+ {error, not_allowed}
+ end.
+
+-spec start_channel
+ (rabbit_channel:channel_number(), pid(), pid(), string(),
+ rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
+ rabbit_framing:amqp_table(), pid(), any()) ->
+ {'ok', pid()}.
+
+start_channel(Number, ClientChannelPid, ConnPid, ConnName, Protocol,
+ User = #user{username = Username}, VHost, Capabilities,
+ Collector, AmqpParams) ->
+ case rabbit_auth_backend_internal:is_over_channel_limit(Username) of
+ false ->
+ {ok, _, {ChannelPid, _}} =
+ supervisor2:start_child(
+ rabbit_direct_client_sup,
+ [{direct, Number, ClientChannelPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, Collector, AmqpParams}]),
+ {ok, ChannelPid};
+ {true, Limit} ->
+ rabbit_log_connection:error(
+ "Error on direct connection ~p~n"
+ "number of channels opened for user '~s' has reached the "
+ "maximum allowed limit of (~w)",
+ [ConnPid, Username, Limit]),
+ {error, not_allowed}
+ end.
+
+-spec disconnect(pid(), rabbit_event:event_props()) -> 'ok'.
+
+disconnect(Pid, Infos) ->
+ pg_local:leave(rabbit_direct, Pid),
+ rabbit_core_metrics:connection_closed(Pid),
+ rabbit_event:notify(connection_closed, Infos).
diff --git a/deps/rabbit/src/rabbit_disk_monitor.erl b/deps/rabbit/src/rabbit_disk_monitor.erl
new file mode 100644
index 0000000000..8277794098
--- /dev/null
+++ b/deps/rabbit/src/rabbit_disk_monitor.erl
@@ -0,0 +1,317 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_disk_monitor).
+
+%% Disk monitoring server. Monitors free disk space
+%% periodically and sets alarms when it is below a certain
+%% watermark (configurable either as an absolute value or
+%% relative to the memory limit).
+%%
+%% Disk monitoring is done by shelling out to /usr/bin/df
+%% instead of related built-in OTP functions because currently
+%% this is the most reliable way of determining free disk space
+%% for the partition our internal database is on.
+%%
+%% Update interval is dynamically calculated assuming disk
+%% space is being filled at FAST_RATE.
+
+-behaviour(gen_server).
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([get_disk_free_limit/0, set_disk_free_limit/1,
+ get_min_check_interval/0, set_min_check_interval/1,
+ get_max_check_interval/0, set_max_check_interval/1,
+ get_disk_free/0, set_enabled/1]).
+
+-define(SERVER, ?MODULE).
+-define(DEFAULT_MIN_DISK_CHECK_INTERVAL, 100).
+-define(DEFAULT_MAX_DISK_CHECK_INTERVAL, 10000).
+-define(DEFAULT_DISK_FREE_LIMIT, 50000000).
+%% 250MB/s i.e. 250kB/ms
+-define(FAST_RATE, (250 * 1000)).
+
+-record(state, {
+ %% monitor partition on which this directory resides
+ dir,
+ %% configured limit in bytes
+ limit,
+ %% last known free disk space amount in bytes
+ actual,
+ %% minimum check interval
+ min_interval,
+ %% maximum check interval
+ max_interval,
+ %% timer that drives periodic checks
+ timer,
+ %% is free disk space alarm currently in effect?
+ alarmed,
+ %% is monitoring enabled? false on unsupported
+ %% platforms
+ enabled,
+ %% number of retries to enable monitoring if it fails
+ %% on start-up
+ retries,
+ %% Interval between retries
+ interval
+}).
+
+%%----------------------------------------------------------------------------
+
+-type disk_free_limit() :: (integer() | string() | {'mem_relative', float() | integer()}).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+-spec get_disk_free_limit() -> integer().
+
+get_disk_free_limit() ->
+ gen_server:call(?MODULE, get_disk_free_limit, infinity).
+
+-spec set_disk_free_limit(disk_free_limit()) -> 'ok'.
+
+set_disk_free_limit(Limit) ->
+ gen_server:call(?MODULE, {set_disk_free_limit, Limit}, infinity).
+
+-spec get_min_check_interval() -> integer().
+
+get_min_check_interval() ->
+ gen_server:call(?MODULE, get_min_check_interval, infinity).
+
+-spec set_min_check_interval(integer()) -> 'ok'.
+
+set_min_check_interval(Interval) ->
+ gen_server:call(?MODULE, {set_min_check_interval, Interval}, infinity).
+
+-spec get_max_check_interval() -> integer().
+
+get_max_check_interval() ->
+ gen_server:call(?MODULE, get_max_check_interval, infinity).
+
+-spec set_max_check_interval(integer()) -> 'ok'.
+
+set_max_check_interval(Interval) ->
+ gen_server:call(?MODULE, {set_max_check_interval, Interval}, infinity).
+
+-spec get_disk_free() -> (integer() | 'unknown').
+-spec set_enabled(string()) -> 'ok'.
+
+get_disk_free() ->
+ gen_server:call(?MODULE, get_disk_free, infinity).
+
+set_enabled(Enabled) ->
+ gen_server:call(?MODULE, {set_enabled, Enabled}, infinity).
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+-spec start_link(disk_free_limit()) -> rabbit_types:ok_pid_or_error().
+
+start_link(Args) ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [Args], []).
+
+init([Limit]) ->
+ Dir = dir(),
+ {ok, Retries} = application:get_env(rabbit, disk_monitor_failure_retries),
+ {ok, Interval} = application:get_env(rabbit, disk_monitor_failure_retry_interval),
+ State = #state{dir = Dir,
+ min_interval = ?DEFAULT_MIN_DISK_CHECK_INTERVAL,
+ max_interval = ?DEFAULT_MAX_DISK_CHECK_INTERVAL,
+ alarmed = false,
+ enabled = true,
+ limit = Limit,
+ retries = Retries,
+ interval = Interval},
+ {ok, enable(State)}.
+
+handle_call(get_disk_free_limit, _From, State = #state{limit = Limit}) ->
+ {reply, Limit, State};
+
+handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) ->
+ rabbit_log:info("Cannot set disk free limit: "
+ "disabled disk free space monitoring", []),
+ {reply, ok, State};
+
+handle_call({set_disk_free_limit, Limit}, _From, State) ->
+ {reply, ok, set_disk_limits(State, Limit)};
+
+handle_call(get_min_check_interval, _From, State) ->
+ {reply, State#state.min_interval, State};
+
+handle_call(get_max_check_interval, _From, State) ->
+ {reply, State#state.max_interval, State};
+
+handle_call({set_min_check_interval, MinInterval}, _From, State) ->
+ {reply, ok, State#state{min_interval = MinInterval}};
+
+handle_call({set_max_check_interval, MaxInterval}, _From, State) ->
+ {reply, ok, State#state{max_interval = MaxInterval}};
+
+handle_call(get_disk_free, _From, State = #state { actual = Actual }) ->
+ {reply, Actual, State};
+
+handle_call({set_enabled, _Enabled = true}, _From, State) ->
+ start_timer(set_disk_limits(State, State#state.limit)),
+ rabbit_log:info("Free disk space monitor was enabled"),
+ {reply, ok, State#state{enabled = true}};
+handle_call({set_enabled, _Enabled = false}, _From, State) ->
+ erlang:cancel_timer(State#state.timer),
+ rabbit_log:info("Free disk space monitor was manually disabled"),
+ {reply, ok, State#state{enabled = false}};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(try_enable, #state{retries = Retries} = State) ->
+ {noreply, enable(State#state{retries = Retries - 1})};
+handle_info(update, State) ->
+ {noreply, start_timer(internal_update(State))};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Server Internals
+%%----------------------------------------------------------------------------
+
+% the partition / drive containing this directory will be monitored
+dir() -> rabbit_mnesia:dir().
+
+set_disk_limits(State, Limit0) ->
+ Limit = interpret_limit(Limit0),
+ State1 = State#state { limit = Limit },
+ rabbit_log:info("Disk free limit set to ~pMB~n",
+ [trunc(Limit / 1000000)]),
+ internal_update(State1).
+
+internal_update(State = #state { limit = Limit,
+ dir = Dir,
+ alarmed = Alarmed}) ->
+ CurrentFree = get_disk_free(Dir),
+ NewAlarmed = CurrentFree < Limit,
+ case {Alarmed, NewAlarmed} of
+ {false, true} ->
+ emit_update_info("insufficient", CurrentFree, Limit),
+ rabbit_alarm:set_alarm({{resource_limit, disk, node()}, []});
+ {true, false} ->
+ emit_update_info("sufficient", CurrentFree, Limit),
+ rabbit_alarm:clear_alarm({resource_limit, disk, node()});
+ _ ->
+ ok
+ end,
+ State #state {alarmed = NewAlarmed, actual = CurrentFree}.
+
+get_disk_free(Dir) ->
+ get_disk_free(Dir, os:type()).
+
+get_disk_free(Dir, {unix, Sun})
+ when Sun =:= sunos; Sun =:= sunos4; Sun =:= solaris ->
+ Df = os:find_executable("df"),
+ parse_free_unix(rabbit_misc:os_cmd(Df ++ " -k " ++ Dir));
+get_disk_free(Dir, {unix, _}) ->
+ Df = os:find_executable("df"),
+ parse_free_unix(rabbit_misc:os_cmd(Df ++ " -kP " ++ Dir));
+get_disk_free(Dir, {win32, _}) ->
+ %% On Windows, the Win32 API enforces a limit of 260 characters
+ %% (MAX_PATH). If we call `dir` with a path longer than that, it
+ %% fails with "File not found". Starting with Windows 10 version
+ %% 1607, this limit was removed, but the administrator has to
+ %% configure that.
+ %%
+ %% NTFS supports paths up to 32767 characters. Therefore, paths
+ %% longer than 260 characters exist but they are "inaccessible" to
+ %% `dir`.
+ %%
+ %% A workaround is to tell the Win32 API to not parse a path and
+ %% just pass it raw to the underlying filesystem. To do this, the
+ %% path must be prepended with "\\?\". That's what we do here.
+ %%
+ %% However, the underlying filesystem may not support forward
+ %% slashes transparently, as the Win32 API does. Therefore, we
+ %% convert all forward slashes to backslashes.
+ %%
+ %% See the following page to learn more about this:
+ %% https://ss64.com/nt/syntax-filenames.html
+ RawDir = "\\\\?\\" ++ string:replace(Dir, "/", "\\", all),
+ parse_free_win32(rabbit_misc:os_cmd("dir /-C /W \"" ++ RawDir ++ "\"")).
+
+parse_free_unix(Str) ->
+ case string:tokens(Str, "\n") of
+ [_, S | _] -> case string:tokens(S, " \t") of
+ [_, _, _, Free | _] -> list_to_integer(Free) * 1024;
+ _ -> exit({unparseable, Str})
+ end;
+ _ -> exit({unparseable, Str})
+ end.
+
+parse_free_win32(CommandResult) ->
+ LastLine = lists:last(string:tokens(CommandResult, "\r\n")),
+ {match, [Free]} = re:run(lists:reverse(LastLine), "(\\d+)",
+ [{capture, all_but_first, list}]),
+ list_to_integer(lists:reverse(Free)).
+
+interpret_limit({mem_relative, Relative})
+ when is_number(Relative) ->
+ round(Relative * vm_memory_monitor:get_total_memory());
+interpret_limit(Absolute) ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of
+ {ok, ParsedAbsolute} -> ParsedAbsolute;
+ {error, parse_error} ->
+ rabbit_log:error("Unable to parse disk_free_limit value ~p",
+ [Absolute]),
+ ?DEFAULT_DISK_FREE_LIMIT
+ end.
+
+emit_update_info(StateStr, CurrentFree, Limit) ->
+ rabbit_log:info(
+ "Free disk space is ~s. Free bytes: ~p. Limit: ~p~n",
+ [StateStr, CurrentFree, Limit]).
+
+start_timer(State) ->
+ State#state{timer = erlang:send_after(interval(State), self(), update)}.
+
+interval(#state{alarmed = true,
+ max_interval = MaxInterval}) ->
+ MaxInterval;
+interval(#state{limit = Limit,
+ actual = Actual,
+ min_interval = MinInterval,
+ max_interval = MaxInterval}) ->
+ IdealInterval = 2 * (Actual - Limit) / ?FAST_RATE,
+ trunc(erlang:max(MinInterval, erlang:min(MaxInterval, IdealInterval))).
+
+enable(#state{retries = 0} = State) ->
+ State;
+enable(#state{dir = Dir, interval = Interval, limit = Limit, retries = Retries}
+ = State) ->
+ case {catch get_disk_free(Dir),
+ vm_memory_monitor:get_total_memory()} of
+ {N1, N2} when is_integer(N1), is_integer(N2) ->
+ rabbit_log:info("Enabling free disk space monitoring~n", []),
+ start_timer(set_disk_limits(State, Limit));
+ Err ->
+ rabbit_log:info("Free disk space monitor encountered an error "
+ "(e.g. failed to parse output from OS tools): ~p, retries left: ~b~n",
+ [Err, Retries]),
+ erlang:send_after(Interval, self(), try_enable),
+ State#state{enabled = false}
+ end.
diff --git a/deps/rabbit/src/rabbit_epmd_monitor.erl b/deps/rabbit/src/rabbit_epmd_monitor.erl
new file mode 100644
index 0000000000..938826dba6
--- /dev/null
+++ b/deps/rabbit/src/rabbit_epmd_monitor.erl
@@ -0,0 +1,104 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_epmd_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-record(state, {timer, mod, me, host, port}).
+
+-define(SERVER, ?MODULE).
+-define(CHECK_FREQUENCY, 60000).
+
+%%----------------------------------------------------------------------------
+%% It's possible for epmd to be killed out from underneath us. If that
+%% happens, then obviously clustering and rabbitmqctl stop
+%% working. This process checks up on epmd and restarts it /
+%% re-registers us with it if it has gone away.
+%%
+%% How could epmd be killed?
+%%
+%% 1) The most popular way for this to happen is when running as a
+%% Windows service. The user starts rabbitmqctl first, and this starts
+%% epmd under the user's account. When they log out epmd is killed.
+%%
+%% 2) Some packagings of (non-RabbitMQ?) Erlang apps might do "killall
+%% epmd" as a shutdown or uninstall step.
+%% ----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+init([]) ->
+ {Me, Host} = rabbit_nodes:parts(node()),
+ Mod = net_kernel:epmd_module(),
+ {ok, Port} = handle_port_please(init, Mod:port_please(Me, Host), Me, undefined),
+ State = #state{mod = Mod, me = Me, host = Host, port = Port},
+ {ok, ensure_timer(State)}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(check, State0) ->
+ {ok, State1} = check_epmd(State0),
+ {noreply, ensure_timer(State1#state{timer = undefined})};
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(check, State0) ->
+ {ok, State1} = check_epmd(State0),
+ {noreply, ensure_timer(State1#state{timer = undefined})};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+ensure_timer(State) ->
+ rabbit_misc:ensure_timer(State, #state.timer, ?CHECK_FREQUENCY, check).
+
+check_epmd(State = #state{mod = Mod,
+ me = Me,
+ host = Host,
+ port = Port0}) ->
+ rabbit_log:debug("Asked to [re-]register this node (~s@~s) with epmd...", [Me, Host]),
+ {ok, Port1} = handle_port_please(check, Mod:port_please(Me, Host), Me, Port0),
+ rabbit_nodes:ensure_epmd(),
+ Mod:register_node(Me, Port1),
+ rabbit_log:debug("[Re-]registered this node (~s@~s) with epmd at port ~p", [Me, Host, Port1]),
+ {ok, State#state{port = Port1}}.
+
+handle_port_please(init, noport, Me, Port) ->
+ rabbit_log:info("epmd does not know us, re-registering as ~s~n", [Me]),
+ {ok, Port};
+handle_port_please(check, noport, Me, Port) ->
+ rabbit_log:warning("epmd does not know us, re-registering ~s at port ~b~n", [Me, Port]),
+ {ok, Port};
+handle_port_please(_, closed, _Me, Port) ->
+ rabbit_log:error("epmd monitor failed to retrieve our port from epmd: closed"),
+ {ok, Port};
+handle_port_please(init, {port, NewPort, _Version}, _Me, _Port) ->
+ rabbit_log:info("epmd monitor knows us, inter-node communication (distribution) port: ~p", [NewPort]),
+ {ok, NewPort};
+handle_port_please(check, {port, NewPort, _Version}, _Me, _Port) ->
+ {ok, NewPort};
+handle_port_please(_, {error, Error}, _Me, Port) ->
+ rabbit_log:error("epmd monitor failed to retrieve our port from epmd: ~p", [Error]),
+ {ok, Port}.
diff --git a/deps/rabbit/src/rabbit_event_consumer.erl b/deps/rabbit/src/rabbit_event_consumer.erl
new file mode 100644
index 0000000000..489d39312e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_event_consumer.erl
@@ -0,0 +1,197 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_event_consumer).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([register/4]).
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {pid, ref, monitor, pattern}).
+
+%%----------------------------------------------------------------------------
+
+register(Pid, Ref, Duration, Pattern) ->
+ case gen_event:add_handler(rabbit_event, ?MODULE, [Pid, Ref, Duration, Pattern]) of
+ ok ->
+ {ok, Ref};
+ Error ->
+ Error
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([Pid, Ref, Duration, Pattern]) ->
+ MRef = erlang:monitor(process, Pid),
+ case Duration of
+ infinity -> infinity;
+ _ -> erlang:send_after(Duration * 1000, self(), rabbit_event_consumer_timeout)
+ end,
+ {ok, #state{pid = Pid, ref = Ref, monitor = MRef, pattern = Pattern}}.
+
+handle_call(_Request, State) -> {ok, not_understood, State}.
+
+handle_event(#event{type = Type,
+ props = Props,
+ timestamp = TS,
+ reference = none}, #state{pid = Pid,
+ ref = Ref,
+ pattern = Pattern} = State) ->
+ case key(Type) of
+ ignore -> ok;
+ Key -> case re:run(Key, Pattern, [{capture, none}]) of
+ match ->
+ Data = [{'event', Key}] ++
+ fmt_proplist([{'timestamp_in_ms', TS} | Props]),
+ Pid ! {Ref, Data, confinue};
+ _ ->
+ ok
+ end
+ end,
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info({'DOWN', MRef, _, _, _}, #state{monitor = MRef}) ->
+ remove_handler;
+handle_info(rabbit_event_consumer_timeout, #state{pid = Pid, ref = Ref}) ->
+ Pid ! {Ref, <<>>, finished},
+ remove_handler;
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, #state{monitor = MRef}) ->
+ erlang:demonitor(MRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+%% pattern matching is way more efficient that the string operations,
+%% let's use all the keys we're aware of to speed up the handler.
+%% Any unknown or new one will be processed as before (see last function clause).
+key(queue_deleted) ->
+ <<"queue.deleted">>;
+key(queue_created) ->
+ <<"queue.created">>;
+key(exchange_created) ->
+ <<"exchange.created">>;
+key(exchange_deleted) ->
+ <<"exchange.deleted">>;
+key(binding_created) ->
+ <<"binding.created">>;
+key(connection_created) ->
+ <<"connection.created">>;
+key(connection_closed) ->
+ <<"connection.closed">>;
+key(channel_created) ->
+ <<"channel.created">>;
+key(channel_closed) ->
+ <<"channel.closed">>;
+key(consumer_created) ->
+ <<"consumer.created">>;
+key(consumer_deleted) ->
+ <<"consumer.deleted">>;
+key(queue_stats) ->
+ ignore;
+key(connection_stats) ->
+ ignore;
+key(policy_set) ->
+ <<"policy.set">>;
+key(policy_cleared) ->
+ <<"policy.cleared">>;
+key(parameter_set) ->
+ <<"parameter.set">>;
+key(parameter_cleared) ->
+ <<"parameter.cleared">>;
+key(vhost_created) ->
+ <<"vhost.created">>;
+key(vhost_deleted) ->
+ <<"vhost.deleted">>;
+key(vhost_limits_set) ->
+ <<"vhost.limits.set">>;
+key(vhost_limits_cleared) ->
+ <<"vhost.limits.cleared">>;
+key(user_authentication_success) ->
+ <<"user.authentication.success">>;
+key(user_authentication_failure) ->
+ <<"user.authentication.failure">>;
+key(user_created) ->
+ <<"user.created">>;
+key(user_deleted) ->
+ <<"user.deleted">>;
+key(user_password_changed) ->
+ <<"user.password.changed">>;
+key(user_password_cleared) ->
+ <<"user.password.cleared">>;
+key(user_tags_set) ->
+ <<"user.tags.set">>;
+key(permission_created) ->
+ <<"permission.created">>;
+key(permission_deleted) ->
+ <<"permission.deleted">>;
+key(topic_permission_created) ->
+ <<"topic.permission.created">>;
+key(topic_permission_deleted) ->
+ <<"topic.permission.deleted">>;
+key(alarm_set) ->
+ <<"alarm.set">>;
+key(alarm_cleared) ->
+ <<"alarm.cleared">>;
+key(shovel_worker_status) ->
+ <<"shovel.worker.status">>;
+key(shovel_worker_removed) ->
+ <<"shovel.worker.removed">>;
+key(federation_link_status) ->
+ <<"federation.link.status">>;
+key(federation_link_removed) ->
+ <<"federation.link.removed">>;
+key(S) ->
+ case string:tokens(atom_to_list(S), "_") of
+ [_, "stats"] -> ignore;
+ Tokens -> list_to_binary(string:join(Tokens, "."))
+ end.
+
+fmt_proplist(Props) ->
+ lists:foldl(fun({K, V}, Acc) ->
+ case fmt(K, V) of
+ L when is_list(L) -> lists:append(L, Acc);
+ T -> [T | Acc]
+ end
+ end, [], Props).
+
+fmt(K, #resource{virtual_host = VHost,
+ name = Name}) -> [{K, Name},
+ {'vhost', VHost}];
+fmt(K, true) -> {K, true};
+fmt(K, false) -> {K, false};
+fmt(K, V) when is_atom(V) -> {K, atom_to_binary(V, utf8)};
+fmt(K, V) when is_integer(V) -> {K, V};
+fmt(K, V) when is_number(V) -> {K, V};
+fmt(K, V) when is_binary(V) -> {K, V};
+fmt(K, [{_, _}|_] = Vs) -> {K, fmt_proplist(Vs)};
+fmt(K, Vs) when is_list(Vs) -> {K, [fmt(V) || V <- Vs]};
+fmt(K, V) when is_pid(V) -> {K, list_to_binary(rabbit_misc:pid_to_string(V))};
+fmt(K, V) -> {K,
+ list_to_binary(
+ rabbit_misc:format("~1000000000p", [V]))}.
+
+%% Exactly the same as fmt/2, duplicated only for performance issues
+fmt(true) -> true;
+fmt(false) -> false;
+fmt(V) when is_atom(V) -> atom_to_binary(V, utf8);
+fmt(V) when is_integer(V) -> V;
+fmt(V) when is_number(V) -> V;
+fmt(V) when is_binary(V) -> V;
+fmt([{_, _}|_] = Vs) -> fmt_proplist(Vs);
+fmt(Vs) when is_list(Vs) -> [fmt(V) || V <- Vs];
+fmt(V) when is_pid(V) -> list_to_binary(rabbit_misc:pid_to_string(V));
+fmt(V) -> list_to_binary(
+ rabbit_misc:format("~1000000000p", [V])).
diff --git a/deps/rabbit/src/rabbit_exchange.erl b/deps/rabbit/src/rabbit_exchange.erl
new file mode 100644
index 0000000000..129b2b868b
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange.erl
@@ -0,0 +1,592 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([recover/1, policy_changed/2, callback/4, declare/7,
+ assert_equivalence/6, assert_args_equivalence/2, check_type/1,
+ lookup/1, lookup_many/1, lookup_or_die/1, list/0, list/1, lookup_scratch/2,
+ update_scratch/3, update_decorators/1, immutable/1,
+ info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4,
+ route/2, delete/3, validate_binding/2, count/0]).
+-export([list_names/0, is_amq_prefixed/1]).
+%% these must be run inside a mnesia tx
+-export([maybe_auto_delete/2, serial/1, peek_serial/1, update/2]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([name/0, type/0]).
+
+-type name() :: rabbit_types:r('exchange').
+-type type() :: atom().
+-type fun_name() :: atom().
+
+%%----------------------------------------------------------------------------
+
+-define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments,
+ policy, user_who_performed_action]).
+
+-spec recover(rabbit_types:vhost()) -> [name()].
+
+recover(VHost) ->
+ Xs = rabbit_misc:table_filter(
+ fun (#exchange{name = XName}) ->
+ XName#resource.virtual_host =:= VHost andalso
+ mnesia:read({rabbit_exchange, XName}) =:= []
+ end,
+ fun (X, Tx) ->
+ X1 = case Tx of
+ true -> store_ram(X);
+ false -> rabbit_exchange_decorator:set(X)
+ end,
+ callback(X1, create, map_create_tx(Tx), [X1])
+ end,
+ rabbit_durable_exchange),
+ [XName || #exchange{name = XName} <- Xs].
+
+-spec callback
+ (rabbit_types:exchange(), fun_name(),
+ fun((boolean()) -> non_neg_integer()) | atom(), [any()]) -> 'ok'.
+
+callback(X = #exchange{type = XType,
+ decorators = Decorators}, Fun, Serial0, Args) ->
+ Serial = if is_function(Serial0) -> Serial0;
+ is_atom(Serial0) -> fun (_Bool) -> Serial0 end
+ end,
+ [ok = apply(M, Fun, [Serial(M:serialise_events(X)) | Args]) ||
+ M <- rabbit_exchange_decorator:select(all, Decorators)],
+ Module = type_to_module(XType),
+ apply(Module, Fun, [Serial(Module:serialise_events()) | Args]).
+
+-spec policy_changed
+ (rabbit_types:exchange(), rabbit_types:exchange()) -> 'ok'.
+
+policy_changed(X = #exchange{type = XType,
+ decorators = Decorators},
+ X1 = #exchange{decorators = Decorators1}) ->
+ D = rabbit_exchange_decorator:select(all, Decorators),
+ D1 = rabbit_exchange_decorator:select(all, Decorators1),
+ DAll = lists:usort(D ++ D1),
+ [ok = M:policy_changed(X, X1) || M <- [type_to_module(XType) | DAll]],
+ ok.
+
+serialise_events(X = #exchange{type = Type, decorators = Decorators}) ->
+ lists:any(fun (M) -> M:serialise_events(X) end,
+ rabbit_exchange_decorator:select(all, Decorators))
+ orelse (type_to_module(Type)):serialise_events().
+
+-spec serial(rabbit_types:exchange()) ->
+ fun((boolean()) -> 'none' | pos_integer()).
+
+serial(#exchange{name = XName} = X) ->
+ Serial = case serialise_events(X) of
+ true -> next_serial(XName);
+ false -> none
+ end,
+ fun (true) -> Serial;
+ (false) -> none
+ end.
+
+-spec is_amq_prefixed(rabbit_types:exchange() | binary()) -> boolean().
+
+is_amq_prefixed(Name) when is_binary(Name) ->
+ case re:run(Name, <<"^amq\.">>) of
+ nomatch -> false;
+ {match, _} -> true
+ end;
+is_amq_prefixed(#exchange{name = #resource{name = <<>>}}) ->
+ false;
+is_amq_prefixed(#exchange{name = #resource{name = Name}}) ->
+ is_amq_prefixed(Name).
+
+-spec declare
+ (name(), type(), boolean(), boolean(), boolean(),
+ rabbit_framing:amqp_table(), rabbit_types:username())
+ -> rabbit_types:exchange().
+
+declare(XName, Type, Durable, AutoDelete, Internal, Args, Username) ->
+ X = rabbit_exchange_decorator:set(
+ rabbit_policy:set(#exchange{name = XName,
+ type = Type,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Args,
+ options = #{user => Username}})),
+ XT = type_to_module(Type),
+ %% We want to upset things if it isn't ok
+ ok = XT:validate(X),
+ %% Avoid a channel exception if there's a race condition
+ %% with an exchange.delete operation.
+ %%
+ %% See rabbitmq/rabbitmq-federation#7.
+ case rabbit_runtime_parameters:lookup(XName#resource.virtual_host,
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT,
+ XName#resource.name) of
+ not_found ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_exchange, XName}) of
+ [] ->
+ {new, store(X)};
+ [ExistingX] ->
+ {existing, ExistingX}
+ end
+ end,
+ fun ({new, Exchange}, Tx) ->
+ ok = callback(X, create, map_create_tx(Tx), [Exchange]),
+ rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)),
+ Exchange;
+ ({existing, Exchange}, _Tx) ->
+ Exchange;
+ (Err, _Tx) ->
+ Err
+ end);
+ _ ->
+ rabbit_log:warning("ignoring exchange.declare for exchange ~p,
+ exchange.delete in progress~n.", [XName]),
+ X
+ end.
+
+map_create_tx(true) -> transaction;
+map_create_tx(false) -> none.
+
+
+store(X = #exchange{durable = true}) ->
+ mnesia:write(rabbit_durable_exchange, X#exchange{decorators = undefined},
+ write),
+ store_ram(X);
+store(X = #exchange{durable = false}) ->
+ store_ram(X).
+
+store_ram(X) ->
+ X1 = rabbit_exchange_decorator:set(X),
+ ok = mnesia:write(rabbit_exchange, rabbit_exchange_decorator:set(X1),
+ write),
+ X1.
+
+%% Used with binaries sent over the wire; the type may not exist.
+
+-spec check_type
+ (binary()) -> atom() | rabbit_types:connection_exit().
+
+check_type(TypeBin) ->
+ case rabbit_registry:binary_to_type(rabbit_data_coercion:to_binary(TypeBin)) of
+ {error, not_found} ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unknown exchange type '~s'", [TypeBin]);
+ T ->
+ case rabbit_registry:lookup_module(exchange, T) of
+ {error, not_found} -> rabbit_misc:protocol_error(
+ command_invalid,
+ "invalid exchange type '~s'", [T]);
+ {ok, _Module} -> T
+ end
+ end.
+
+-spec assert_equivalence
+ (rabbit_types:exchange(), atom(), boolean(), boolean(), boolean(),
+ rabbit_framing:amqp_table())
+ -> 'ok' | rabbit_types:connection_exit().
+
+assert_equivalence(X = #exchange{ name = XName,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ type = Type},
+ ReqType, ReqDurable, ReqAutoDelete, ReqInternal, ReqArgs) ->
+ AFE = fun rabbit_misc:assert_field_equivalence/4,
+ AFE(Type, ReqType, XName, type),
+ AFE(Durable, ReqDurable, XName, durable),
+ AFE(AutoDelete, ReqAutoDelete, XName, auto_delete),
+ AFE(Internal, ReqInternal, XName, internal),
+ (type_to_module(Type)):assert_args_equivalence(X, ReqArgs).
+
+-spec assert_args_equivalence
+ (rabbit_types:exchange(), rabbit_framing:amqp_table())
+ -> 'ok' | rabbit_types:connection_exit().
+
+assert_args_equivalence(#exchange{ name = Name, arguments = Args },
+ RequiredArgs) ->
+ %% The spec says "Arguments are compared for semantic
+ %% equivalence". The only arg we care about is
+ %% "alternate-exchange".
+ rabbit_misc:assert_args_equivalence(Args, RequiredArgs, Name,
+ [<<"alternate-exchange">>]).
+
+-spec lookup
+ (name()) -> rabbit_types:ok(rabbit_types:exchange()) |
+ rabbit_types:error('not_found').
+
+lookup(Name) ->
+ rabbit_misc:dirty_read({rabbit_exchange, Name}).
+
+
+-spec lookup_many([name()]) -> [rabbit_types:exchange()].
+
+lookup_many([]) -> [];
+lookup_many([Name]) -> ets:lookup(rabbit_exchange, Name);
+lookup_many(Names) when is_list(Names) ->
+ %% Normally we'd call mnesia:dirty_read/1 here, but that is quite
+ %% expensive for reasons explained in rabbit_misc:dirty_read/1.
+ lists:append([ets:lookup(rabbit_exchange, Name) || Name <- Names]).
+
+
+-spec lookup_or_die
+ (name()) -> rabbit_types:exchange() |
+ rabbit_types:channel_exit().
+
+lookup_or_die(Name) ->
+ case lookup(Name) of
+ {ok, X} -> X;
+ {error, not_found} -> rabbit_amqqueue:not_found(Name)
+ end.
+
+-spec list() -> [rabbit_types:exchange()].
+
+list() -> mnesia:dirty_match_object(rabbit_exchange, #exchange{_ = '_'}).
+
+-spec count() -> non_neg_integer().
+
+count() ->
+ mnesia:table_info(rabbit_exchange, size).
+
+-spec list_names() -> [rabbit_exchange:name()].
+
+list_names() -> mnesia:dirty_all_keys(rabbit_exchange).
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+
+-spec list(rabbit_types:vhost()) -> [rabbit_types:exchange()].
+
+list(VHostPath) ->
+ mnesia:async_dirty(
+ fun () ->
+ mnesia:match_object(
+ rabbit_exchange,
+ #exchange{name = rabbit_misc:r(VHostPath, exchange), _ = '_'},
+ read)
+ end).
+
+-spec lookup_scratch(name(), atom()) ->
+ rabbit_types:ok(term()) |
+ rabbit_types:error('not_found').
+
+lookup_scratch(Name, App) ->
+ case lookup(Name) of
+ {ok, #exchange{scratches = undefined}} ->
+ {error, not_found};
+ {ok, #exchange{scratches = Scratches}} ->
+ case orddict:find(App, Scratches) of
+ {ok, Value} -> {ok, Value};
+ error -> {error, not_found}
+ end;
+ {error, not_found} ->
+ {error, not_found}
+ end.
+
+-spec update_scratch(name(), atom(), fun((any()) -> any())) -> 'ok'.
+
+update_scratch(Name, App, Fun) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ update(Name,
+ fun(X = #exchange{scratches = Scratches0}) ->
+ Scratches1 = case Scratches0 of
+ undefined -> orddict:new();
+ _ -> Scratches0
+ end,
+ Scratch = case orddict:find(App, Scratches1) of
+ {ok, S} -> S;
+ error -> undefined
+ end,
+ Scratches2 = orddict:store(
+ App, Fun(Scratch), Scratches1),
+ X#exchange{scratches = Scratches2}
+ end),
+ ok
+ end).
+
+-spec update_decorators(name()) -> 'ok'.
+
+update_decorators(Name) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ case mnesia:wread({rabbit_exchange, Name}) of
+ [X] -> store_ram(X),
+ ok;
+ [] -> ok
+ end
+ end).
+
+-spec update
+ (name(),
+ fun((rabbit_types:exchange()) -> rabbit_types:exchange()))
+ -> not_found | rabbit_types:exchange().
+
+update(Name, Fun) ->
+ case mnesia:wread({rabbit_exchange, Name}) of
+ [X] -> X1 = Fun(X),
+ store(X1);
+ [] -> not_found
+ end.
+
+-spec immutable(rabbit_types:exchange()) -> rabbit_types:exchange().
+
+immutable(X) -> X#exchange{scratches = none,
+ policy = none,
+ decorators = none}.
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
+
+map(VHostPath, F) ->
+ %% TODO: there is scope for optimisation here, e.g. using a
+ %% cursor, parallelising the function invocation
+ lists:map(F, list(VHostPath)).
+
+infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
+
+i(name, #exchange{name = Name}) -> Name;
+i(type, #exchange{type = Type}) -> Type;
+i(durable, #exchange{durable = Durable}) -> Durable;
+i(auto_delete, #exchange{auto_delete = AutoDelete}) -> AutoDelete;
+i(internal, #exchange{internal = Internal}) -> Internal;
+i(arguments, #exchange{arguments = Arguments}) -> Arguments;
+i(policy, X) -> case rabbit_policy:name(X) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(user_who_performed_action, #exchange{options = Opts}) ->
+ maps:get(user, Opts, ?UNKNOWN_USER);
+i(Item, #exchange{type = Type} = X) ->
+ case (type_to_module(Type)):info(X, [Item]) of
+ [{Item, I}] -> I;
+ [] -> throw({bad_argument, Item})
+ end.
+
+-spec info(rabbit_types:exchange()) -> rabbit_types:infos().
+
+info(X = #exchange{type = Type}) ->
+ infos(?INFO_KEYS, X) ++ (type_to_module(Type)):info(X).
+
+-spec info
+ (rabbit_types:exchange(), rabbit_types:info_keys())
+ -> rabbit_types:infos().
+
+info(X = #exchange{type = _Type}, Items) ->
+ infos(Items, X).
+
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys())
+ -> [rabbit_types:infos()].
+
+info_all(VHostPath, Items) -> map(VHostPath, fun (X) -> info(X, Items) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys(),
+ reference(), pid())
+ -> 'ok'.
+
+info_all(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(X) -> info(X, Items) end, list(VHostPath)).
+
+-spec route(rabbit_types:exchange(), rabbit_types:delivery())
+ -> [rabbit_amqqueue:name()].
+
+route(#exchange{name = #resource{virtual_host = VHost, name = RName} = XName,
+ decorators = Decorators} = X,
+ #delivery{message = #basic_message{routing_keys = RKs}} = Delivery) ->
+ case RName of
+ <<>> ->
+ RKsSorted = lists:usort(RKs),
+ [rabbit_channel:deliver_reply(RK, Delivery) ||
+ RK <- RKsSorted, virtual_reply_queue(RK)],
+ [rabbit_misc:r(VHost, queue, RK) || RK <- RKsSorted,
+ not virtual_reply_queue(RK)];
+ _ ->
+ Decs = rabbit_exchange_decorator:select(route, Decorators),
+ lists:usort(route1(Delivery, Decs, {[X], XName, []}))
+ end.
+
+virtual_reply_queue(<<"amq.rabbitmq.reply-to.", _/binary>>) -> true;
+virtual_reply_queue(_) -> false.
+
+route1(_, _, {[], _, QNames}) ->
+ QNames;
+route1(Delivery, Decorators,
+ {[X = #exchange{type = Type} | WorkList], SeenXs, QNames}) ->
+ ExchangeDests = (type_to_module(Type)):route(X, Delivery),
+ DecorateDests = process_decorators(X, Decorators, Delivery),
+ AlternateDests = process_alternate(X, ExchangeDests),
+ route1(Delivery, Decorators,
+ lists:foldl(fun process_route/2, {WorkList, SeenXs, QNames},
+ AlternateDests ++ DecorateDests ++ ExchangeDests)).
+
+process_alternate(X = #exchange{name = XName}, []) ->
+ case rabbit_policy:get_arg(
+ <<"alternate-exchange">>, <<"alternate-exchange">>, X) of
+ undefined -> [];
+ AName -> [rabbit_misc:r(XName, exchange, AName)]
+ end;
+process_alternate(_X, _Results) ->
+ [].
+
+process_decorators(_, [], _) -> %% optimisation
+ [];
+process_decorators(X, Decorators, Delivery) ->
+ lists:append([Decorator:route(X, Delivery) || Decorator <- Decorators]).
+
+process_route(#resource{kind = exchange} = XName,
+ {_WorkList, XName, _QNames} = Acc) ->
+ Acc;
+process_route(#resource{kind = exchange} = XName,
+ {WorkList, #resource{kind = exchange} = SeenX, QNames}) ->
+ {cons_if_present(XName, WorkList),
+ gb_sets:from_list([SeenX, XName]), QNames};
+process_route(#resource{kind = exchange} = XName,
+ {WorkList, SeenXs, QNames} = Acc) ->
+ case gb_sets:is_element(XName, SeenXs) of
+ true -> Acc;
+ false -> {cons_if_present(XName, WorkList),
+ gb_sets:add_element(XName, SeenXs), QNames}
+ end;
+process_route(#resource{kind = queue} = QName,
+ {WorkList, SeenXs, QNames}) ->
+ {WorkList, SeenXs, [QName | QNames]}.
+
+cons_if_present(XName, L) ->
+ case lookup(XName) of
+ {ok, X} -> [X | L];
+ {error, not_found} -> L
+ end.
+
+call_with_exchange(XName, Fun) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () -> case mnesia:read({rabbit_exchange, XName}) of
+ [] -> rabbit_misc:const({error, not_found});
+ [X] -> Fun(X)
+ end
+ end).
+
+-spec delete
+ (name(), 'true', rabbit_types:username()) ->
+ 'ok'| rabbit_types:error('not_found' | 'in_use');
+ (name(), 'false', rabbit_types:username()) ->
+ 'ok' | rabbit_types:error('not_found').
+
+delete(XName, IfUnused, Username) ->
+ Fun = case IfUnused of
+ true -> fun conditional_delete/2;
+ false -> fun unconditional_delete/2
+ end,
+ try
+ %% guard exchange.declare operations from failing when there's
+ %% a race condition between it and an exchange.delete.
+ %%
+ %% see rabbitmq/rabbitmq-federation#7
+ rabbit_runtime_parameters:set(XName#resource.virtual_host,
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT,
+ XName#resource.name, true, Username),
+ call_with_exchange(
+ XName,
+ fun (X) ->
+ case Fun(X, false) of
+ {deleted, X, Bs, Deletions} ->
+ rabbit_binding:process_deletions(
+ rabbit_binding:add_deletion(
+ XName, {X, deleted, Bs}, Deletions), Username);
+ {error, _InUseOrNotFound} = E ->
+ rabbit_misc:const(E)
+ end
+ end)
+ after
+ rabbit_runtime_parameters:clear(XName#resource.virtual_host,
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT,
+ XName#resource.name, Username)
+ end.
+
+-spec validate_binding
+ (rabbit_types:exchange(), rabbit_types:binding())
+ -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}).
+
+validate_binding(X = #exchange{type = XType}, Binding) ->
+ Module = type_to_module(XType),
+ Module:validate_binding(X, Binding).
+
+-spec maybe_auto_delete
+ (rabbit_types:exchange(), boolean())
+ -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}.
+
+maybe_auto_delete(#exchange{auto_delete = false}, _OnlyDurable) ->
+ not_deleted;
+maybe_auto_delete(#exchange{auto_delete = true} = X, OnlyDurable) ->
+ case conditional_delete(X, OnlyDurable) of
+ {error, in_use} -> not_deleted;
+ {deleted, X, [], Deletions} -> {deleted, Deletions}
+ end.
+
+conditional_delete(X = #exchange{name = XName}, OnlyDurable) ->
+ case rabbit_binding:has_for_source(XName) of
+ false -> internal_delete(X, OnlyDurable, false);
+ true -> {error, in_use}
+ end.
+
+unconditional_delete(X, OnlyDurable) ->
+ internal_delete(X, OnlyDurable, true).
+
+internal_delete(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSource) ->
+ ok = mnesia:delete({rabbit_exchange, XName}),
+ ok = mnesia:delete({rabbit_exchange_serial, XName}),
+ mnesia:delete({rabbit_durable_exchange, XName}),
+ Bindings = case RemoveBindingsForSource of
+ true -> rabbit_binding:remove_for_source(XName);
+ false -> []
+ end,
+ {deleted, X, Bindings, rabbit_binding:remove_for_destination(
+ XName, OnlyDurable)}.
+
+next_serial(XName) ->
+ Serial = peek_serial(XName, write),
+ ok = mnesia:write(rabbit_exchange_serial,
+ #exchange_serial{name = XName, next = Serial + 1}, write),
+ Serial.
+
+-spec peek_serial(name()) -> pos_integer() | 'undefined'.
+
+peek_serial(XName) -> peek_serial(XName, read).
+
+peek_serial(XName, LockType) ->
+ case mnesia:read(rabbit_exchange_serial, XName, LockType) of
+ [#exchange_serial{next = Serial}] -> Serial;
+ _ -> 1
+ end.
+
+invalid_module(T) ->
+ rabbit_log:warning("Could not find exchange type ~s.~n", [T]),
+ put({xtype_to_module, T}, rabbit_exchange_type_invalid),
+ rabbit_exchange_type_invalid.
+
+%% Used with atoms from records; e.g., the type is expected to exist.
+type_to_module(T) ->
+ case get({xtype_to_module, T}) of
+ undefined ->
+ case rabbit_registry:lookup_module(exchange, T) of
+ {ok, Module} -> put({xtype_to_module, T}, Module),
+ Module;
+ {error, not_found} -> invalid_module(T)
+ end;
+ Module ->
+ Module
+ end.
diff --git a/deps/rabbit/src/rabbit_exchange_decorator.erl b/deps/rabbit/src/rabbit_exchange_decorator.erl
new file mode 100644
index 0000000000..02d0258d3c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_decorator.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_decorator).
+
+-include("rabbit.hrl").
+
+-export([select/2, set/1]).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+%% This is like an exchange type except that:
+%%
+%% 1) It applies to all exchanges as soon as it is installed, therefore
+%% 2) It is not allowed to affect validation, so no validate/1 or
+%% assert_args_equivalence/2
+%%
+%% It's possible in the future we might make decorators
+%% able to manipulate messages as they are published.
+
+-type(tx() :: 'transaction' | 'none').
+-type(serial() :: pos_integer() | tx()).
+
+-callback description() -> [proplists:property()].
+
+%% Should Rabbit ensure that all binding events that are
+%% delivered to an individual exchange can be serialised? (they
+%% might still be delivered out of order, but there'll be a
+%% serial number).
+-callback serialise_events(rabbit_types:exchange()) -> boolean().
+
+%% called after declaration and recovery
+-callback create(tx(), rabbit_types:exchange()) -> 'ok'.
+
+%% called after exchange (auto)deletion.
+-callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
+ 'ok'.
+
+%% called when the policy attached to this exchange changes.
+-callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
+ 'ok'.
+
+%% called after a binding has been added or recovered
+-callback add_binding(serial(), rabbit_types:exchange(),
+ rabbit_types:binding()) -> 'ok'.
+
+%% called after bindings have been deleted.
+-callback remove_bindings(serial(), rabbit_types:exchange(),
+ [rabbit_types:binding()]) -> 'ok'.
+
+%% Allows additional destinations to be added to the routing decision.
+-callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
+ [rabbit_amqqueue:name() | rabbit_exchange:name()].
+
+%% Whether the decorator wishes to receive callbacks for the exchange
+%% none:no callbacks, noroute:all callbacks except route, all:all callbacks
+-callback active_for(rabbit_types:exchange()) -> 'none' | 'noroute' | 'all'.
+
+%%----------------------------------------------------------------------------
+
+added_to_rabbit_registry(_Type, _ModuleName) ->
+ [maybe_recover(X) || X <- rabbit_exchange:list()],
+ ok.
+removed_from_rabbit_registry(_Type) ->
+ [maybe_recover(X) || X <- rabbit_exchange:list()],
+ ok.
+
+%% select a subset of active decorators
+select(all, {Route, NoRoute}) -> filter(Route ++ NoRoute);
+select(route, {Route, _NoRoute}) -> filter(Route);
+select(raw, {Route, NoRoute}) -> Route ++ NoRoute.
+
+filter(Modules) ->
+ [M || M <- Modules, code:which(M) =/= non_existing].
+
+set(X) ->
+ Decs = lists:foldl(fun (D, {Route, NoRoute}) ->
+ ActiveFor = D:active_for(X),
+ {cons_if_eq(all, ActiveFor, D, Route),
+ cons_if_eq(noroute, ActiveFor, D, NoRoute)}
+ end, {[], []}, list()),
+ X#exchange{decorators = Decs}.
+
+list() -> [M || {_, M} <- rabbit_registry:lookup_all(exchange_decorator)].
+
+cons_if_eq(Select, Select, Item, List) -> [Item | List];
+cons_if_eq(_Select, _Other, _Item, List) -> List.
+
+maybe_recover(X = #exchange{name = Name,
+ decorators = Decs}) ->
+ #exchange{decorators = Decs1} = set(X),
+ Old = lists:sort(select(all, Decs)),
+ New = lists:sort(select(all, Decs1)),
+ case New of
+ Old -> ok;
+ _ -> %% TODO create a tx here for non-federation decorators
+ [M:create(none, X) || M <- New -- Old],
+ rabbit_exchange:update_decorators(Name)
+ end.
diff --git a/deps/rabbit/src/rabbit_exchange_parameters.erl b/deps/rabbit/src/rabbit_exchange_parameters.erl
new file mode 100644
index 0000000000..f9de648cfa
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_parameters.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_parameters).
+
+-behaviour(rabbit_runtime_parameter).
+
+-include("rabbit.hrl").
+
+-export([register/0]).
+-export([validate/5, notify/5, notify_clear/4]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange parameters"},
+ {mfa, {rabbit_exchange_parameters, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ rabbit_registry:register(runtime_parameter,
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, ?MODULE),
+ %% ensure there are no leftovers from before node restart/crash
+ rabbit_runtime_parameters:clear_component(
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT,
+ ?INTERNAL_USER),
+ ok.
+
+validate(_VHost, ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, _Name, _Term, _User) ->
+ ok.
+
+notify(_VHost, ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, _Name, _Term, _Username) ->
+ ok.
+
+notify_clear(_VHost, ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, _Name, _Username) ->
+ ok.
diff --git a/deps/rabbit/src/rabbit_exchange_type_direct.erl b/deps/rabbit/src/rabbit_exchange_type_direct.erl
new file mode 100644
index 0000000000..3f4350e7b0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_direct.erl
@@ -0,0 +1,46 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_direct).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type direct"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"direct">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"AMQP direct exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name},
+ #delivery{message = #basic_message{routing_keys = Routes}}) ->
+ rabbit_router:match_routing_key(Name, Routes).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbit/src/rabbit_exchange_type_fanout.erl b/deps/rabbit/src/rabbit_exchange_type_fanout.erl
new file mode 100644
index 0000000000..a8778cf0c7
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_fanout.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_fanout).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type fanout"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"fanout">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"AMQP fanout exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name}, _Delivery) ->
+ rabbit_router:match_routing_key(Name, ['_']).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbit/src/rabbit_exchange_type_headers.erl b/deps/rabbit/src/rabbit_exchange_type_headers.erl
new file mode 100644
index 0000000000..e40195de7a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_headers.erl
@@ -0,0 +1,136 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_headers).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type headers"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"headers">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"AMQP headers exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name},
+ #delivery{message = #basic_message{content = Content}}) ->
+ Headers = case (Content#content.properties)#'P_basic'.headers of
+ undefined -> [];
+ H -> rabbit_misc:sort_field_table(H)
+ end,
+ rabbit_router:match_bindings(
+ Name, fun (#binding{args = Spec}) -> headers_match(Spec, Headers) end).
+
+validate_binding(_X, #binding{args = Args}) ->
+ case rabbit_misc:table_lookup(Args, <<"x-match">>) of
+ {longstr, <<"all">>} -> ok;
+ {longstr, <<"any">>} -> ok;
+ {longstr, Other} -> {error,
+ {binding_invalid,
+ "Invalid x-match field value ~p; "
+ "expected all or any", [Other]}};
+ {Type, Other} -> {error,
+ {binding_invalid,
+ "Invalid x-match field type ~p (value ~p); "
+ "expected longstr", [Type, Other]}};
+ undefined -> ok %% [0]
+ end.
+%% [0] spec is vague on whether it can be omitted but in practice it's
+%% useful to allow people to do this
+
+parse_x_match({longstr, <<"all">>}) -> all;
+parse_x_match({longstr, <<"any">>}) -> any;
+parse_x_match(_) -> all. %% legacy; we didn't validate
+
+%% Horrendous matching algorithm. Depends for its merge-like
+%% (linear-time) behaviour on the lists:keysort
+%% (rabbit_misc:sort_field_table) that route/1 and
+%% rabbit_binding:{add,remove}/2 do.
+%%
+%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+%% In other words: REQUIRES BOTH PATTERN AND DATA TO BE SORTED ASCENDING BY KEY.
+%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+%%
+
+-spec headers_match
+ (rabbit_framing:amqp_table(), rabbit_framing:amqp_table()) ->
+ boolean().
+
+headers_match(Args, Data) ->
+ MK = parse_x_match(rabbit_misc:table_lookup(Args, <<"x-match">>)),
+ headers_match(Args, Data, true, false, MK).
+
+% A bit less horrendous algorithm :)
+headers_match(_, _, false, _, all) -> false;
+headers_match(_, _, _, true, any) -> true;
+
+% No more bindings, return current state
+headers_match([], _Data, AllMatch, _AnyMatch, all) -> AllMatch;
+headers_match([], _Data, _AllMatch, AnyMatch, any) -> AnyMatch;
+
+% Delete bindings starting with x-
+headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data,
+ AllMatch, AnyMatch, MatchKind) ->
+ headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind);
+
+% No more data, but still bindings, false with all
+headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) ->
+ headers_match([], [], false, AnyMatch, MatchKind);
+
+% Data key header not in binding, go next data
+headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest],
+ AllMatch, AnyMatch, MatchKind) when PK > DK ->
+ headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind);
+
+% Binding key header not in data, false with all, go next binding
+headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _],
+ _AllMatch, AnyMatch, MatchKind) when PK < DK ->
+ headers_match(PRest, Data, false, AnyMatch, MatchKind);
+
+%% It's not properly specified, but a "no value" in a
+%% pattern field is supposed to mean simple presence of
+%% the corresponding data field. I've interpreted that to
+%% mean a type of "void" for the pattern field.
+headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest],
+ AllMatch, _AnyMatch, MatchKind) when PK == DK ->
+ headers_match(PRest, DRest, AllMatch, true, MatchKind);
+
+% Complete match, true with any, go next
+headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest],
+ AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV ->
+ headers_match(PRest, DRest, AllMatch, true, MatchKind);
+
+% Value does not match, false with all, go next
+headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest],
+ _AllMatch, AnyMatch, MatchKind) when PK == DK ->
+ headers_match(PRest, DRest, false, AnyMatch, MatchKind).
+
+
+validate(_X) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbit/src/rabbit_exchange_type_invalid.erl b/deps/rabbit/src/rabbit_exchange_type_invalid.erl
new file mode 100644
index 0000000000..3fa27d28e9
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_invalid.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_invalid).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description,
+ <<"Dummy exchange type, to be used when the intended one is not found.">>
+ }].
+
+serialise_events() -> false.
+
+-spec route(rabbit_types:exchange(), rabbit_types:delivery()) -> no_return().
+
+route(#exchange{name = Name, type = Type}, _) ->
+ rabbit_misc:protocol_error(
+ precondition_failed,
+ "Cannot route message through ~s: exchange type ~s not found",
+ [rabbit_misc:rs(Name), Type]).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbit/src/rabbit_exchange_type_topic.erl b/deps/rabbit/src/rabbit_exchange_type_topic.erl
new file mode 100644
index 0000000000..38b05895f2
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_topic.erl
@@ -0,0 +1,266 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_topic).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type topic"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"topic">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%----------------------------------------------------------------------------
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"AMQP topic exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+%% NB: This may return duplicate results in some situations (that's ok)
+route(#exchange{name = X},
+ #delivery{message = #basic_message{routing_keys = Routes}}) ->
+ lists:append([begin
+ Words = split_topic_key(RKey),
+ mnesia:async_dirty(fun trie_match/2, [X, Words])
+ end || RKey <- Routes]).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+
+delete(transaction, #exchange{name = X}, _Bs) ->
+ trie_remove_all_nodes(X),
+ trie_remove_all_edges(X),
+ trie_remove_all_bindings(X),
+ ok;
+delete(none, _Exchange, _Bs) ->
+ ok.
+
+policy_changed(_X1, _X2) -> ok.
+
+add_binding(transaction, _Exchange, Binding) ->
+ internal_add_binding(Binding);
+add_binding(none, _Exchange, _Binding) ->
+ ok.
+
+remove_bindings(transaction, _X, Bs) ->
+ %% See rabbit_binding:lock_route_tables for the rationale for
+ %% taking table locks.
+ case Bs of
+ [_] -> ok;
+ _ -> [mnesia:lock({table, T}, write) ||
+ T <- [rabbit_topic_trie_node,
+ rabbit_topic_trie_edge,
+ rabbit_topic_trie_binding]]
+ end,
+ [case follow_down_get_path(X, split_topic_key(K)) of
+ {ok, Path = [{FinalNode, _} | _]} ->
+ trie_remove_binding(X, FinalNode, D, Args),
+ remove_path_if_empty(X, Path);
+ {error, _Node, _RestW} ->
+ %% We're trying to remove a binding that no longer exists.
+ %% That's unexpected, but shouldn't be a problem.
+ ok
+ end || #binding{source = X, key = K, destination = D, args = Args} <- Bs],
+ ok;
+remove_bindings(none, _X, _Bs) ->
+ ok.
+
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
+
+%%----------------------------------------------------------------------------
+
+internal_add_binding(#binding{source = X, key = K, destination = D,
+ args = Args}) ->
+ FinalNode = follow_down_create(X, split_topic_key(K)),
+ trie_add_binding(X, FinalNode, D, Args),
+ ok.
+
+trie_match(X, Words) ->
+ trie_match(X, root, Words, []).
+
+trie_match(X, Node, [], ResAcc) ->
+ trie_match_part(X, Node, "#", fun trie_match_skip_any/4, [],
+ trie_bindings(X, Node) ++ ResAcc);
+trie_match(X, Node, [W | RestW] = Words, ResAcc) ->
+ lists:foldl(fun ({WArg, MatchFun, RestWArg}, Acc) ->
+ trie_match_part(X, Node, WArg, MatchFun, RestWArg, Acc)
+ end, ResAcc, [{W, fun trie_match/4, RestW},
+ {"*", fun trie_match/4, RestW},
+ {"#", fun trie_match_skip_any/4, Words}]).
+
+trie_match_part(X, Node, Search, MatchFun, RestW, ResAcc) ->
+ case trie_child(X, Node, Search) of
+ {ok, NextNode} -> MatchFun(X, NextNode, RestW, ResAcc);
+ error -> ResAcc
+ end.
+
+trie_match_skip_any(X, Node, [], ResAcc) ->
+ trie_match(X, Node, [], ResAcc);
+trie_match_skip_any(X, Node, [_ | RestW] = Words, ResAcc) ->
+ trie_match_skip_any(X, Node, RestW,
+ trie_match(X, Node, Words, ResAcc)).
+
+follow_down_create(X, Words) ->
+ case follow_down_last_node(X, Words) of
+ {ok, FinalNode} -> FinalNode;
+ {error, Node, RestW} -> lists:foldl(
+ fun (W, CurNode) ->
+ NewNode = new_node_id(),
+ trie_add_edge(X, CurNode, NewNode, W),
+ NewNode
+ end, Node, RestW)
+ end.
+
+follow_down_last_node(X, Words) ->
+ follow_down(X, fun (_, Node, _) -> Node end, root, Words).
+
+follow_down_get_path(X, Words) ->
+ follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end,
+ [{root, none}], Words).
+
+follow_down(X, AccFun, Acc0, Words) ->
+ follow_down(X, root, AccFun, Acc0, Words).
+
+follow_down(_X, _CurNode, _AccFun, Acc, []) ->
+ {ok, Acc};
+follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) ->
+ case trie_child(X, CurNode, W) of
+ {ok, NextNode} -> follow_down(X, NextNode, AccFun,
+ AccFun(W, NextNode, Acc), RestW);
+ error -> {error, Acc, Words}
+ end.
+
+remove_path_if_empty(_, [{root, none}]) ->
+ ok;
+remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) ->
+ case mnesia:read(rabbit_topic_trie_node,
+ #trie_node{exchange_name = X, node_id = Node}, write) of
+ [] -> trie_remove_edge(X, Parent, Node, W),
+ remove_path_if_empty(X, RestPath);
+ _ -> ok
+ end.
+
+trie_child(X, Node, Word) ->
+ case mnesia:read({rabbit_topic_trie_edge,
+ #trie_edge{exchange_name = X,
+ node_id = Node,
+ word = Word}}) of
+ [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode};
+ [] -> error
+ end.
+
+trie_bindings(X, Node) ->
+ MatchHead = #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X,
+ node_id = Node,
+ destination = '$1',
+ arguments = '_'}},
+ mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]).
+
+trie_update_node_counts(X, Node, Field, Delta) ->
+ E = case mnesia:read(rabbit_topic_trie_node,
+ #trie_node{exchange_name = X,
+ node_id = Node}, write) of
+ [] -> #topic_trie_node{trie_node = #trie_node{
+ exchange_name = X,
+ node_id = Node},
+ edge_count = 0,
+ binding_count = 0};
+ [E0] -> E0
+ end,
+ case setelement(Field, E, element(Field, E) + Delta) of
+ #topic_trie_node{edge_count = 0, binding_count = 0} ->
+ ok = mnesia:delete_object(rabbit_topic_trie_node, E, write);
+ EN ->
+ ok = mnesia:write(rabbit_topic_trie_node, EN, write)
+ end.
+
+trie_add_edge(X, FromNode, ToNode, W) ->
+ trie_update_node_counts(X, FromNode, #topic_trie_node.edge_count, +1),
+ trie_edge_op(X, FromNode, ToNode, W, fun mnesia:write/3).
+
+trie_remove_edge(X, FromNode, ToNode, W) ->
+ trie_update_node_counts(X, FromNode, #topic_trie_node.edge_count, -1),
+ trie_edge_op(X, FromNode, ToNode, W, fun mnesia:delete_object/3).
+
+trie_edge_op(X, FromNode, ToNode, W, Op) ->
+ ok = Op(rabbit_topic_trie_edge,
+ #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X,
+ node_id = FromNode,
+ word = W},
+ node_id = ToNode},
+ write).
+
+trie_add_binding(X, Node, D, Args) ->
+ trie_update_node_counts(X, Node, #topic_trie_node.binding_count, +1),
+ trie_binding_op(X, Node, D, Args, fun mnesia:write/3).
+
+trie_remove_binding(X, Node, D, Args) ->
+ trie_update_node_counts(X, Node, #topic_trie_node.binding_count, -1),
+ trie_binding_op(X, Node, D, Args, fun mnesia:delete_object/3).
+
+trie_binding_op(X, Node, D, Args, Op) ->
+ ok = Op(rabbit_topic_trie_binding,
+ #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X,
+ node_id = Node,
+ destination = D,
+ arguments = Args}},
+ write).
+
+trie_remove_all_nodes(X) ->
+ remove_all(rabbit_topic_trie_node,
+ #topic_trie_node{trie_node = #trie_node{exchange_name = X,
+ _ = '_'},
+ _ = '_'}).
+
+trie_remove_all_edges(X) ->
+ remove_all(rabbit_topic_trie_edge,
+ #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X,
+ _ = '_'},
+ _ = '_'}).
+
+trie_remove_all_bindings(X) ->
+ remove_all(rabbit_topic_trie_binding,
+ #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X, _ = '_'},
+ _ = '_'}).
+
+remove_all(Table, Pattern) ->
+ lists:foreach(fun (R) -> mnesia:delete_object(Table, R, write) end,
+ mnesia:match_object(Table, Pattern, write)).
+
+new_node_id() ->
+ rabbit_guid:gen().
+
+split_topic_key(Key) ->
+ split_topic_key(Key, [], []).
+
+split_topic_key(<<>>, [], []) ->
+ [];
+split_topic_key(<<>>, RevWordAcc, RevResAcc) ->
+ lists:reverse([lists:reverse(RevWordAcc) | RevResAcc]);
+split_topic_key(<<$., Rest/binary>>, RevWordAcc, RevResAcc) ->
+ split_topic_key(Rest, [], [lists:reverse(RevWordAcc) | RevResAcc]);
+split_topic_key(<<C:8, Rest/binary>>, RevWordAcc, RevResAcc) ->
+ split_topic_key(Rest, [C | RevWordAcc], RevResAcc).
diff --git a/deps/rabbit/src/rabbit_feature_flags.erl b/deps/rabbit/src/rabbit_feature_flags.erl
new file mode 100644
index 0000000000..921ec9ab53
--- /dev/null
+++ b/deps/rabbit/src/rabbit_feature_flags.erl
@@ -0,0 +1,2470 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @author The RabbitMQ team
+%% @copyright 2018-2020 VMware, Inc. or its affiliates.
+%%
+%% @doc
+%% This module offers a framework to declare capabilities a RabbitMQ node
+%% supports and therefore a way to determine if multiple RabbitMQ nodes in
+%% a cluster are compatible and can work together.
+%%
+%% == What a feature flag is ==
+%%
+%% A <strong>feature flag</strong> is a name and several properties given
+%% to a change in RabbitMQ which impacts its communication with other
+%% RabbitMQ nodes. This kind of change can be:
+%% <ul>
+%% <li>an update to an Erlang record</li>
+%% <li>a modification to a replicated Mnesia table schema</li>
+%% <li>a modification to Erlang messages exchanged between Erlang processes
+%% which might run on remote nodes</li>
+%% </ul>
+%%
+%% A feature flag is qualified by:
+%% <ul>
+%% <li>a <strong>name</strong></li>
+%% <li>a <strong>description</strong> (optional)</li>
+%% <li>a list of other <strong>feature flags this feature flag depends on
+%% </strong> (optional). This can be useful when the change builds up on
+%% top of a previous change. For instance, it expands a record which was
+%% already modified by a previous feature flag.</li>
+%% <li>a <strong>migration function</strong> (optional). If provided, this
+%% function is called when the feature flag is enabled. It is responsible
+%% for doing all the data conversion, if any, and confirming the feature
+%% flag can be enabled.</li>
+%% <li>a level of stability (stable or experimental). For now, this is only
+%% informational. But it might be used for specific purposes in the
+%% future.</li>
+%% </ul>
+%%
+%% == How to declare a feature flag ==
+%%
+%% To define a new feature flag, you need to use the
+%% `rabbit_feature_flag()' module attribute:
+%%
+%% ```
+%% -rabbit_feature_flag(FeatureFlag).
+%% '''
+%%
+%% `FeatureFlag' is a {@type feature_flag_modattr()}.
+%%
+%% == How to enable a feature flag ==
+%%
+%% To enable a supported feature flag, you have the following solutions:
+%%
+%% <ul>
+%% <li>Using this module API:
+%% ```
+%% rabbit_feature_flags:enable(FeatureFlagName).
+%% '''
+%% </li>
+%% <li>Using the `rabbitmqctl' CLI:
+%% ```
+%% rabbitmqctl enable_feature_flag "$feature_flag_name"
+%% '''
+%% </li>
+%% </ul>
+%%
+%% == How to disable a feature flag ==
+%%
+%% Once enabled, there is <strong>currently no way to disable</strong> a
+%% feature flag.
+
+-module(rabbit_feature_flags).
+
+-export([list/0,
+ list/1,
+ list/2,
+ enable/1,
+ enable_all/0,
+ disable/1,
+ disable_all/0,
+ is_supported/1,
+ is_supported/2,
+ is_supported_locally/1,
+ is_supported_remotely/1,
+ is_supported_remotely/2,
+ is_supported_remotely/3,
+ is_enabled/1,
+ is_enabled/2,
+ is_disabled/1,
+ is_disabled/2,
+ info/0,
+ info/1,
+ init/0,
+ get_state/1,
+ get_stability/1,
+ check_node_compatibility/1,
+ check_node_compatibility/2,
+ is_node_compatible/1,
+ is_node_compatible/2,
+ sync_feature_flags_with_cluster/2,
+ sync_feature_flags_with_cluster/3,
+ refresh_feature_flags_after_app_load/1,
+ enabled_feature_flags_list_file/0
+ ]).
+
+%% RabbitMQ internal use only.
+-export([initialize_registry/0,
+ initialize_registry/1,
+ mark_as_enabled_locally/2,
+ remote_nodes/0,
+ running_remote_nodes/0,
+ does_node_support/3,
+ merge_feature_flags_from_unknown_apps/1,
+ do_sync_feature_flags_with_node/1]).
+
+-ifdef(TEST).
+-export([inject_test_feature_flags/1,
+ initialize_registry/3,
+ query_supported_feature_flags/0,
+ mark_as_enabled_remotely/2,
+ mark_as_enabled_remotely/4,
+ registry_loading_lock/0]).
+-endif.
+
+%% Default timeout for operations on remote nodes.
+-define(TIMEOUT, 60000).
+
+-define(FF_REGISTRY_LOADING_LOCK, {feature_flags_registry_loading, self()}).
+-define(FF_STATE_CHANGE_LOCK, {feature_flags_state_change, self()}).
+
+-type feature_flag_modattr() :: {feature_name(),
+ feature_props()}.
+%% The value of a `-rabbitmq_feature_flag()' module attribute used to
+%% declare a new feature flag.
+
+-type feature_name() :: atom().
+%% The feature flag's name. It is used in many places to identify a
+%% specific feature flag. In particular, this is how an end-user (or
+%% the CLI) can enable a feature flag. This is also the only bit which
+%% is persisted so a node remember which feature flags are enabled.
+
+-type feature_props() :: #{desc => string(),
+ doc_url => string(),
+ stability => stability(),
+ depends_on => [feature_name()],
+ migration_fun => migration_fun_name()}.
+%% The feature flag properties.
+%%
+%% All properties are optional.
+%%
+%% The properties are:
+%% <ul>
+%% <li>`desc': a description of the feature flag</li>
+%% <li>`doc_url': a URL pointing to more documentation about the feature
+%% flag</li>
+%% <li>`stability': the level of stability</li>
+%% <li>`depends_on': a list of feature flags name which must be enabled
+%% before this one</li>
+%% <li>`migration_fun': a migration function specified by its module and
+%% function names</li>
+%% </ul>
+%%
+%% Note that the `migration_fun' is a {@type migration_fun_name()},
+%% not a {@type migration_fun()}. However, the function signature
+%% must conform to the {@type migration_fun()} signature. The reason
+%% is that we must be able to represent it as an Erlang term when
+%% we regenerate the registry module source code (using {@link
+%% erl_syntax:abstract/1}).
+
+-type feature_flags() :: #{feature_name() => feature_props_extended()}.
+%% The feature flags map as returned or accepted by several functions in
+%% this module. In particular, this what the {@link list/0} function
+%% returns.
+
+-type feature_props_extended() :: #{desc => string(),
+ doc_url => string(),
+ stability => stability(),
+ migration_fun => migration_fun_name(),
+ depends_on => [feature_name()],
+ provided_by => atom()}.
+%% The feature flag properties, once expanded by this module when feature
+%% flags are discovered.
+%%
+%% The new properties compared to {@type feature_props()} are:
+%% <ul>
+%% <li>`provided_by': the name of the application providing the feature flag</li>
+%% </ul>
+
+-type feature_state() :: boolean() | state_changing.
+%% The state of the feature flag: enabled if `true', disabled if `false'
+%% or `state_changing'.
+
+-type feature_states() :: #{feature_name() => feature_state()}.
+
+-type stability() :: stable | experimental.
+%% The level of stability of a feature flag. Currently, only informational.
+
+-type migration_fun_name() :: {Module :: atom(), Function :: atom()}.
+%% The name of the module and function to call when changing the state of
+%% the feature flag.
+
+-type migration_fun() :: fun((feature_name(),
+ feature_props_extended(),
+ migration_fun_context())
+ -> ok | {error, any()} | % context = enable
+ boolean() | undefined). % context = is_enabled
+%% The migration function signature.
+%%
+%% It is called with context `enable' when a feature flag is being enabled.
+%% The function is responsible for this feature-flag-specific verification
+%% and data conversion. It returns `ok' if RabbitMQ can mark the feature
+%% flag as enabled an continue with the next one, if any. Otherwise, it
+%% returns `{error, any()}' if there is an error and the feature flag should
+%% remain disabled. The function must be idempotent: if the feature flag is
+%% already enabled on another node and the local node is running this function
+%% again because it is syncing its feature flags state, it should succeed.
+%%
+%% It is called with the context `is_enabled' to check if a feature flag
+%% is actually enabled. It is useful on RabbitMQ startup, just in case
+%% the previous instance failed to write the feature flags list file.
+
+-type migration_fun_context() :: enable | is_enabled.
+
+-type registry_vsn() :: term().
+
+-export_type([feature_flag_modattr/0,
+ feature_props/0,
+ feature_name/0,
+ feature_flags/0,
+ feature_props_extended/0,
+ feature_state/0,
+ feature_states/0,
+ stability/0,
+ migration_fun_name/0,
+ migration_fun/0,
+ migration_fun_context/0]).
+
+-on_load(on_load/0).
+
+-spec list() -> feature_flags().
+%% @doc
+%% Lists all supported feature flags.
+%%
+%% @returns A map of all supported feature flags.
+
+list() -> list(all).
+
+-spec list(Which :: all | enabled | disabled) -> feature_flags().
+%% @doc
+%% Lists all, enabled or disabled feature flags, depending on the argument.
+%%
+%% @param Which The group of feature flags to return: `all', `enabled' or
+%% `disabled'.
+%% @returns A map of selected feature flags.
+
+list(all) -> rabbit_ff_registry:list(all);
+list(enabled) -> rabbit_ff_registry:list(enabled);
+list(disabled) -> maps:filter(
+ fun(FeatureName, _) -> is_disabled(FeatureName) end,
+ list(all)).
+
+-spec list(all | enabled | disabled, stability()) -> feature_flags().
+%% @doc
+%% Lists all, enabled or disabled feature flags, depending on the first
+%% argument, only keeping those having the specified stability.
+%%
+%% @param Which The group of feature flags to return: `all', `enabled' or
+%% `disabled'.
+%% @param Stability The level of stability used to filter the map of feature
+%% flags.
+%% @returns A map of selected feature flags.
+
+list(Which, Stability)
+ when Stability =:= stable orelse Stability =:= experimental ->
+ maps:filter(fun(_, FeatureProps) ->
+ Stability =:= get_stability(FeatureProps)
+ end, list(Which)).
+
+-spec enable(feature_name() | [feature_name()]) -> ok |
+ {error, Reason :: any()}.
+%% @doc
+%% Enables the specified feature flag or set of feature flags.
+%%
+%% @param FeatureName The name or the list of names of feature flags to
+%% enable.
+%% @returns `ok' if the feature flags (and all the feature flags they
+%% depend on) were successfully enabled, or `{error, Reason}' if one
+%% feature flag could not be enabled (subsequent feature flags in the
+%% dependency tree are left unchanged).
+
+enable(FeatureName) when is_atom(FeatureName) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: REQUEST TO ENABLE",
+ [FeatureName]),
+ case is_enabled(FeatureName) of
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: already enabled",
+ [FeatureName]),
+ ok;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: not enabled, check if supported by cluster",
+ [FeatureName]),
+ %% The feature flag must be supported locally and remotely
+ %% (i.e. by all members of the cluster).
+ case is_supported(FeatureName) of
+ true ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: supported, attempt to enable...",
+ [FeatureName]),
+ do_enable(FeatureName);
+ false ->
+ rabbit_log_feature_flags:error(
+ "Feature flag `~s`: not supported",
+ [FeatureName]),
+ {error, unsupported}
+ end
+ end;
+enable(FeatureNames) when is_list(FeatureNames) ->
+ with_feature_flags(FeatureNames, fun enable/1).
+
+-spec enable_all() -> ok | {error, any()}.
+%% @doc
+%% Enables all supported feature flags.
+%%
+%% @returns `ok' if the feature flags were successfully enabled,
+%% or `{error, Reason}' if one feature flag could not be enabled
+%% (subsequent feature flags in the dependency tree are left
+%% unchanged).
+
+enable_all() ->
+ with_feature_flags(maps:keys(list(all)), fun enable/1).
+
+-spec disable(feature_name() | [feature_name()]) -> ok | {error, any()}.
+%% @doc
+%% Disables the specified feature flag or set of feature flags.
+%%
+%% @param FeatureName The name or the list of names of feature flags to
+%% disable.
+%% @returns `ok' if the feature flags (and all the feature flags they
+%% depend on) were successfully disabled, or `{error, Reason}' if one
+%% feature flag could not be disabled (subsequent feature flags in the
+%% dependency tree are left unchanged).
+
+disable(FeatureName) when is_atom(FeatureName) ->
+ {error, unsupported};
+disable(FeatureNames) when is_list(FeatureNames) ->
+ with_feature_flags(FeatureNames, fun disable/1).
+
+-spec disable_all() -> ok | {error, any()}.
+%% @doc
+%% Disables all supported feature flags.
+%%
+%% @returns `ok' if the feature flags were successfully disabled,
+%% or `{error, Reason}' if one feature flag could not be disabled
+%% (subsequent feature flags in the dependency tree are left
+%% unchanged).
+
+disable_all() ->
+ with_feature_flags(maps:keys(list(all)), fun disable/1).
+
+-spec with_feature_flags([feature_name()],
+ fun((feature_name()) -> ok | {error, any()})) ->
+ ok | {error, any()}.
+%% @private
+
+with_feature_flags([FeatureName | Rest], Fun) ->
+ case Fun(FeatureName) of
+ ok -> with_feature_flags(Rest, Fun);
+ Error -> Error
+ end;
+with_feature_flags([], _) ->
+ ok.
+
+-spec is_supported(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by the entire cluster.
+%%
+%% This is the same as calling both {@link is_supported_locally/1} and
+%% {@link is_supported_remotely/1} with a logical AND.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not or the RPC timed out.
+
+is_supported(FeatureNames) ->
+ is_supported_locally(FeatureNames) andalso
+ is_supported_remotely(FeatureNames).
+
+-spec is_supported(feature_name() | [feature_name()], timeout()) ->
+ boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by the entire cluster.
+%%
+%% This is the same as calling both {@link is_supported_locally/1} and
+%% {@link is_supported_remotely/2} with a logical AND.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not or the RPC timed out.
+
+is_supported(FeatureNames, Timeout) ->
+ is_supported_locally(FeatureNames) andalso
+ is_supported_remotely(FeatureNames, Timeout).
+
+-spec is_supported_locally(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by the local node.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not.
+
+is_supported_locally(FeatureName) when is_atom(FeatureName) ->
+ rabbit_ff_registry:is_supported(FeatureName);
+is_supported_locally(FeatureNames) when is_list(FeatureNames) ->
+ lists:all(fun(F) -> rabbit_ff_registry:is_supported(F) end, FeatureNames).
+
+-spec is_supported_remotely(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by all remote nodes.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not or the RPC timed out.
+
+is_supported_remotely(FeatureNames) ->
+ is_supported_remotely(FeatureNames, ?TIMEOUT).
+
+-spec is_supported_remotely(feature_name() | [feature_name()], timeout()) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by all remote nodes.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not or the RPC timed out.
+
+is_supported_remotely(FeatureName, Timeout) when is_atom(FeatureName) ->
+ is_supported_remotely([FeatureName], Timeout);
+is_supported_remotely([], _) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: skipping query for feature flags support as the "
+ "given list is empty"),
+ true;
+is_supported_remotely(FeatureNames, Timeout) when is_list(FeatureNames) ->
+ case running_remote_nodes() of
+ [] ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: isolated node; skipping remote node query "
+ "=> consider `~p` supported",
+ [FeatureNames]),
+ true;
+ RemoteNodes ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: about to query these remote nodes about "
+ "support for `~p`: ~p",
+ [FeatureNames, RemoteNodes]),
+ is_supported_remotely(RemoteNodes, FeatureNames, Timeout)
+ end.
+
+-spec is_supported_remotely([node()],
+ feature_name() | [feature_name()],
+ timeout()) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by specified remote nodes.
+%%
+%% @param RemoteNodes The list of remote nodes to query.
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `true' if the set of feature flags is entirely supported by
+%% all nodes, or `false' if one of them is not or the RPC timed out.
+
+is_supported_remotely(_, [], _) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: skipping query for feature flags support as the "
+ "given list is empty"),
+ true;
+is_supported_remotely([Node | Rest], FeatureNames, Timeout) ->
+ case does_node_support(Node, FeatureNames, Timeout) of
+ true ->
+ is_supported_remotely(Rest, FeatureNames, Timeout);
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: stopping query for support for `~p` here",
+ [FeatureNames]),
+ false
+ end;
+is_supported_remotely([], FeatureNames, _) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: all running remote nodes support `~p`",
+ [FeatureNames]),
+ true.
+
+-spec is_enabled(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% enabled.
+%%
+%% This is the same as calling {@link is_enabled/2} as a `blocking'
+%% call.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is enabled, or
+%% `false' if one of them is not.
+
+is_enabled(FeatureNames) ->
+ is_enabled(FeatureNames, blocking).
+
+-spec is_enabled
+(feature_name() | [feature_name()], blocking) ->
+ boolean();
+(feature_name() | [feature_name()], non_blocking) ->
+ feature_state().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% enabled.
+%%
+%% When `blocking' is passed, the function waits (blocks) for the
+%% state of a feature flag being disabled or enabled stabilizes before
+%% returning its final state.
+%%
+%% When `non_blocking' is passed, the function returns immediately with
+%% the state of the feature flag (`true' if enabled, `false' otherwise)
+%% or `state_changing' is the state is being changed at the time of the
+%% call.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is enabled,
+%% `false' if one of them is not, or `state_changing' if one of them
+%% is being worked on. Note that `state_changing' has precedence over
+%% `false', so if one is `false' and another one is `state_changing',
+%% `state_changing' is returned.
+
+is_enabled(FeatureNames, non_blocking) ->
+ is_enabled_nb(FeatureNames);
+is_enabled(FeatureNames, blocking) ->
+ case is_enabled_nb(FeatureNames) of
+ state_changing ->
+ global:set_lock(?FF_STATE_CHANGE_LOCK),
+ global:del_lock(?FF_STATE_CHANGE_LOCK),
+ is_enabled(FeatureNames, blocking);
+ IsEnabled ->
+ IsEnabled
+ end.
+
+is_enabled_nb(FeatureName) when is_atom(FeatureName) ->
+ rabbit_ff_registry:is_enabled(FeatureName);
+is_enabled_nb(FeatureNames) when is_list(FeatureNames) ->
+ lists:foldl(
+ fun
+ (_F, state_changing = Acc) ->
+ Acc;
+ (F, false = Acc) ->
+ case rabbit_ff_registry:is_enabled(F) of
+ state_changing -> state_changing;
+ _ -> Acc
+ end;
+ (F, _) ->
+ rabbit_ff_registry:is_enabled(F)
+ end,
+ true, FeatureNames).
+
+-spec is_disabled(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or one feature flag in a set of
+%% feature flags is disabled.
+%%
+%% This is the same as negating the result of {@link is_enabled/1}.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if one of the feature flags is disabled, or
+%% `false' if they are all enabled.
+
+is_disabled(FeatureNames) ->
+ is_disabled(FeatureNames, blocking).
+
+-spec is_disabled
+(feature_name() | [feature_name()], blocking) ->
+ boolean();
+(feature_name() | [feature_name()], non_blocking) ->
+ feature_state().
+%% @doc
+%% Returns if a single feature flag or one feature flag in a set of
+%% feature flags is disabled.
+%%
+%% This is the same as negating the result of {@link is_enabled/2},
+%% except that `state_changing' is returned as is.
+%%
+%% See {@link is_enabled/2} for a description of the `blocking' and
+%% `non_blocking' modes.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if one feature flag in the set of feature flags is
+%% disabled, `false' if they are all enabled, or `state_changing' if
+%% one of them is being worked on. Note that `state_changing' has
+%% precedence over `true', so if one is `true' (i.e. disabled) and
+%% another one is `state_changing', `state_changing' is returned.
+%%
+%% @see is_enabled/2
+
+is_disabled(FeatureName, Blocking) ->
+ case is_enabled(FeatureName, Blocking) of
+ state_changing -> state_changing;
+ IsEnabled -> not IsEnabled
+ end.
+
+-spec info() -> ok.
+%% @doc
+%% Displays a table on stdout summing up the supported feature flags,
+%% their state and various informations about them.
+
+info() ->
+ info(#{}).
+
+-spec info(#{color => boolean(),
+ lines => boolean(),
+ verbose => non_neg_integer()}) -> ok.
+%% @doc
+%% Displays a table on stdout summing up the supported feature flags,
+%% their state and various informations about them.
+%%
+%% Supported options are:
+%% <ul>
+%% <li>`color': a boolean to indicate if colors should be used to
+%% highlight some elements.</li>
+%% <li>`lines': a boolean to indicate if table borders should be drawn
+%% using ASCII lines instead of regular characters.</li>
+%% <li>`verbose': a non-negative integer to specify the level of
+%% verbosity.</li>
+%% </ul>
+%%
+%% @param Options A map of various options to tune the displayed table.
+
+info(Options) when is_map(Options) ->
+ rabbit_ff_extra:info(Options).
+
+-spec get_state(feature_name()) -> enabled | disabled | unavailable.
+%% @doc
+%% Returns the state of a feature flag.
+%%
+%% The possible states are:
+%% <ul>
+%% <li>`enabled': the feature flag is enabled.</li>
+%% <li>`disabled': the feature flag is supported by all nodes in the
+%% cluster but currently disabled.</li>
+%% <li>`unavailable': the feature flag is unsupported by at least one
+%% node in the cluster and can not be enabled for now.</li>
+%% </ul>
+%%
+%% @param FeatureName The name of the feature flag to check.
+%% @returns `enabled', `disabled' or `unavailable'.
+
+get_state(FeatureName) when is_atom(FeatureName) ->
+ IsEnabled = is_enabled(FeatureName),
+ IsSupported = is_supported(FeatureName),
+ case IsEnabled of
+ true -> enabled;
+ false -> case IsSupported of
+ true -> disabled;
+ false -> unavailable
+ end
+ end.
+
+-spec get_stability(feature_name() | feature_props_extended()) -> stability().
+%% @doc
+%% Returns the stability of a feature flag.
+%%
+%% The possible stability levels are:
+%% <ul>
+%% <li>`stable': the feature flag is stable and will not change in future
+%% releases: it can be enabled in production.</li>
+%% <li>`experimental': the feature flag is experimental and may change in
+%% the future (without a guaranteed upgrade path): enabling it in
+%% production is not recommended.</li>
+%% <li>`unavailable': the feature flag is unsupported by at least one
+%% node in the cluster and can not be enabled for now.</li>
+%% </ul>
+%%
+%% @param FeatureName The name of the feature flag to check.
+%% @returns `stable' or `experimental'.
+
+get_stability(FeatureName) when is_atom(FeatureName) ->
+ case rabbit_ff_registry:get(FeatureName) of
+ undefined -> undefined;
+ FeatureProps -> get_stability(FeatureProps)
+ end;
+get_stability(FeatureProps) when is_map(FeatureProps) ->
+ maps:get(stability, FeatureProps, stable).
+
+%% -------------------------------------------------------------------
+%% Feature flags registry.
+%% -------------------------------------------------------------------
+
+-spec init() -> ok | no_return().
+%% @private
+
+init() ->
+ %% We want to make sure the `feature_flags` file exists once
+ %% RabbitMQ was started at least once. This is not required by
+ %% this module (it works fine if the file is missing) but it helps
+ %% external tools.
+ _ = ensure_enabled_feature_flags_list_file_exists(),
+
+ %% We also "list" supported feature flags. We are not interested in
+ %% that list, however, it triggers the first initialization of the
+ %% registry.
+ _ = list(all),
+ ok.
+
+-spec initialize_registry() -> ok | {error, any()} | no_return().
+%% @private
+%% @doc
+%% Initializes or reinitializes the registry.
+%%
+%% The registry is an Erlang module recompiled at runtime to hold the
+%% state of all supported feature flags.
+%%
+%% That Erlang module is called {@link rabbit_ff_registry}. The initial
+%% source code of this module simply calls this function so it is
+%% replaced by a proper registry.
+%%
+%% Once replaced, the registry contains the map of all supported feature
+%% flags and their state. This is makes it very efficient to query a
+%% feature flag state or property.
+%%
+%% The registry is local to all RabbitMQ nodes.
+
+initialize_registry() ->
+ initialize_registry(#{}).
+
+-spec initialize_registry(feature_flags()) ->
+ ok | {error, any()} | no_return().
+%% @private
+%% @doc
+%% Initializes or reinitializes the registry.
+%%
+%% See {@link initialize_registry/0} for a description of the registry.
+%%
+%% This function takes a map of new supported feature flags (so their
+%% name and extended properties) to add to the existing known feature
+%% flags.
+
+initialize_registry(NewSupportedFeatureFlags) ->
+ %% The first step is to get the feature flag states: if this is the
+ %% first time we initialize it, we read the list from disk (the
+ %% `feature_flags` file). Otherwise we query the existing registry
+ %% before it is replaced.
+ RegistryInitialized = rabbit_ff_registry:is_registry_initialized(),
+ FeatureStates = case RegistryInitialized of
+ true ->
+ rabbit_ff_registry:states();
+ false ->
+ EnabledFeatureNames =
+ read_enabled_feature_flags_list(),
+ list_of_enabled_feature_flags_to_feature_states(
+ EnabledFeatureNames)
+ end,
+
+ %% We also record if the feature flags state was correctly written
+ %% to disk. Currently we don't use this information, but in the
+ %% future, we might want to retry the write if it failed so far.
+ %%
+ %% TODO: Retry to write the feature flags state if the first try
+ %% failed.
+ WrittenToDisk = case RegistryInitialized of
+ true ->
+ rabbit_ff_registry:is_registry_written_to_disk();
+ false ->
+ true
+ end,
+ initialize_registry(NewSupportedFeatureFlags,
+ FeatureStates,
+ WrittenToDisk).
+
+-spec list_of_enabled_feature_flags_to_feature_states([feature_name()]) ->
+ feature_states().
+
+list_of_enabled_feature_flags_to_feature_states(FeatureNames) ->
+ maps:from_list([{FeatureName, true} || FeatureName <- FeatureNames]).
+
+-spec initialize_registry(feature_flags(),
+ feature_states(),
+ boolean()) ->
+ ok | {error, any()} | no_return().
+%% @private
+%% @doc
+%% Initializes or reinitializes the registry.
+%%
+%% See {@link initialize_registry/0} for a description of the registry.
+%%
+%% This function takes a map of new supported feature flags (so their
+%% name and extended properties) to add to the existing known feature
+%% flags, a map of the new feature flag states (whether they are
+%% enabled, disabled or `state_changing'), and a flag to indicate if the
+%% feature flag states was recorded to disk.
+%%
+%% The latter is used to block callers asking if a feature flag is
+%% enabled or disabled while its state is changing.
+
+initialize_registry(NewSupportedFeatureFlags,
+ NewFeatureStates,
+ WrittenToDisk) ->
+ Ret = maybe_initialize_registry(NewSupportedFeatureFlags,
+ NewFeatureStates,
+ WrittenToDisk),
+ case Ret of
+ ok -> ok;
+ restart -> initialize_registry(NewSupportedFeatureFlags,
+ NewFeatureStates,
+ WrittenToDisk);
+ Error -> Error
+ end.
+
+-spec maybe_initialize_registry(feature_flags(),
+ feature_states(),
+ boolean()) ->
+ ok | restart | {error, any()} | no_return().
+
+maybe_initialize_registry(NewSupportedFeatureFlags,
+ NewFeatureStates,
+ WrittenToDisk) ->
+ %% We save the version of the current registry before computing
+ %% the new one. This is used when we do the actual reload: if the
+ %% current registry was reloaded in the meantime, we need to restart
+ %% the computation to make sure we don't loose data.
+ RegistryVsn = registry_vsn(),
+
+ %% We take the feature flags already registered.
+ RegistryInitialized = rabbit_ff_registry:is_registry_initialized(),
+ KnownFeatureFlags1 = case RegistryInitialized of
+ true -> rabbit_ff_registry:list(all);
+ false -> #{}
+ end,
+
+ %% Query the list (it's a map to be exact) of known
+ %% supported feature flags. That list comes from the
+ %% `-rabbitmq_feature_flag().` module attributes exposed by all
+ %% currently loaded Erlang modules.
+ KnownFeatureFlags2 = query_supported_feature_flags(),
+
+ %% We merge the feature flags we already knew about
+ %% (KnownFeatureFlags1), those found in the loaded applications
+ %% (KnownFeatureFlags2) and those specified in arguments
+ %% (NewSupportedFeatureFlags). The latter come from remote nodes
+ %% usually: for example, they can come from plugins loaded on remote
+ %% node but the plugins are missing locally. In this case, we
+ %% consider those feature flags supported because there is no code
+ %% locally which would cause issues.
+ %%
+ %% It means that the list of feature flags only grows. we don't try
+ %% to clean it at some point because we want to remember about the
+ %% feature flags we saw (and their state). It should be fine because
+ %% that list should remain small.
+ KnownFeatureFlags = maps:merge(KnownFeatureFlags1,
+ KnownFeatureFlags2),
+ AllFeatureFlags = maps:merge(KnownFeatureFlags,
+ NewSupportedFeatureFlags),
+
+ %% Next we want to update the feature states, based on the new
+ %% states passed as arguments.
+ FeatureStates0 = case RegistryInitialized of
+ true ->
+ maps:merge(rabbit_ff_registry:states(),
+ NewFeatureStates);
+ false ->
+ NewFeatureStates
+ end,
+ FeatureStates = maps:filter(
+ fun(_, true) -> true;
+ (_, state_changing) -> true;
+ (_, false) -> false
+ end, FeatureStates0),
+
+ Proceed = does_registry_need_refresh(AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk),
+
+ case Proceed of
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: (re)initialize registry (~p)",
+ [self()]),
+ T0 = erlang:timestamp(),
+ Ret = do_initialize_registry(RegistryVsn,
+ AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk),
+ T1 = erlang:timestamp(),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: time to regen registry: ~p µs",
+ [timer:now_diff(T1, T0)]),
+ Ret;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry already up-to-date, skipping init"),
+ ok
+ end.
+
+-spec does_registry_need_refresh(feature_flags(),
+ feature_states(),
+ boolean()) ->
+ boolean().
+
+does_registry_need_refresh(AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk) ->
+ case rabbit_ff_registry:is_registry_initialized() of
+ true ->
+ %% Before proceeding with the actual
+ %% (re)initialization, let's see if there are any
+ %% changes.
+ CurrentAllFeatureFlags = rabbit_ff_registry:list(all),
+ CurrentFeatureStates = rabbit_ff_registry:states(),
+ CurrentWrittenToDisk =
+ rabbit_ff_registry:is_registry_written_to_disk(),
+
+ if
+ AllFeatureFlags =/= CurrentAllFeatureFlags ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: "
+ "yes, list of feature flags differs"),
+ true;
+ FeatureStates =/= CurrentFeatureStates ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: "
+ "yes, feature flag states differ"),
+ true;
+ WrittenToDisk =/= CurrentWrittenToDisk ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: "
+ "yes, \"written to disk\" state changed"),
+ true;
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: no"),
+ false
+ end;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: "
+ "yes, first-time initialization"),
+ true
+ end.
+
+-spec do_initialize_registry(registry_vsn(),
+ feature_flags(),
+ feature_states(),
+ boolean()) ->
+ ok | restart | {error, any()} | no_return().
+%% @private
+
+do_initialize_registry(RegistryVsn,
+ AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk) ->
+ %% We log the state of those feature flags.
+ rabbit_log_feature_flags:info(
+ "Feature flags: list of feature flags found:"),
+ lists:foreach(
+ fun(FeatureName) ->
+ rabbit_log_feature_flags:info(
+ "Feature flags: [~s] ~s",
+ [case maps:is_key(FeatureName, FeatureStates) of
+ true ->
+ case maps:get(FeatureName, FeatureStates) of
+ true -> "x";
+ state_changing -> "~"
+ end;
+ false ->
+ " "
+ end,
+ FeatureName])
+ end, lists:sort(maps:keys(AllFeatureFlags))),
+ rabbit_log_feature_flags:info(
+ "Feature flags: feature flag states written to disk: ~s",
+ [case WrittenToDisk of
+ true -> "yes";
+ false -> "no"
+ end]),
+
+ %% We request the registry to be regenerated and reloaded with the
+ %% new state.
+ regen_registry_mod(RegistryVsn,
+ AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk).
+
+-spec query_supported_feature_flags() -> feature_flags().
+%% @private
+
+-ifdef(TEST).
+-define(PT_TESTSUITE_ATTRS, {?MODULE, testsuite_feature_flags_attrs}).
+
+inject_test_feature_flags(AttributesFromTestsuite) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: injecting feature flags from testsuite: ~p",
+ [AttributesFromTestsuite]),
+ ok = persistent_term:put(?PT_TESTSUITE_ATTRS, AttributesFromTestsuite),
+ initialize_registry().
+
+module_attributes_from_testsuite() ->
+ persistent_term:get(?PT_TESTSUITE_ATTRS, []).
+
+query_supported_feature_flags() ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: query feature flags in loaded applications "
+ "+ testsuite"),
+ T0 = erlang:timestamp(),
+ AttributesPerApp = rabbit_misc:rabbitmq_related_module_attributes(
+ rabbit_feature_flag),
+ AttributesFromTestsuite = module_attributes_from_testsuite(),
+ T1 = erlang:timestamp(),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: time to find supported feature flags: ~p µs",
+ [timer:now_diff(T1, T0)]),
+ AllAttributes = AttributesPerApp ++ AttributesFromTestsuite,
+ prepare_queried_feature_flags(AllAttributes, #{}).
+-else.
+query_supported_feature_flags() ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: query feature flags in loaded applications"),
+ T0 = erlang:timestamp(),
+ AttributesPerApp = rabbit_misc:rabbitmq_related_module_attributes(
+ rabbit_feature_flag),
+ T1 = erlang:timestamp(),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: time to find supported feature flags: ~p µs",
+ [timer:now_diff(T1, T0)]),
+ prepare_queried_feature_flags(AttributesPerApp, #{}).
+-endif.
+
+prepare_queried_feature_flags([{App, _Module, Attributes} | Rest],
+ AllFeatureFlags) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: application `~s` has ~b feature flags",
+ [App, length(Attributes)]),
+ AllFeatureFlags1 = lists:foldl(
+ fun({FeatureName, FeatureProps}, AllFF) ->
+ merge_new_feature_flags(AllFF,
+ App,
+ FeatureName,
+ FeatureProps)
+ end, AllFeatureFlags, Attributes),
+ prepare_queried_feature_flags(Rest, AllFeatureFlags1);
+prepare_queried_feature_flags([], AllFeatureFlags) ->
+ AllFeatureFlags.
+
+-spec merge_new_feature_flags(feature_flags(),
+ atom(),
+ feature_name(),
+ feature_props()) -> feature_flags().
+%% @private
+
+merge_new_feature_flags(AllFeatureFlags, App, FeatureName, FeatureProps)
+ when is_atom(FeatureName) andalso is_map(FeatureProps) ->
+ %% We expand the feature flag properties map with:
+ %% - the name of the application providing it: only informational
+ %% for now, but can be handy to understand that a feature flag
+ %% comes from a plugin.
+ FeatureProps1 = maps:put(provided_by, App, FeatureProps),
+ maps:merge(AllFeatureFlags,
+ #{FeatureName => FeatureProps1}).
+
+-spec regen_registry_mod(registry_vsn(),
+ feature_flags(),
+ feature_states(),
+ boolean()) ->
+ ok | restart | {error, any()} | no_return().
+%% @private
+
+regen_registry_mod(RegistryVsn,
+ AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk) ->
+ %% Here, we recreate the source code of the `rabbit_ff_registry`
+ %% module from scratch.
+ %%
+ %% IMPORTANT: We want both modules to have the exact same public
+ %% API in order to simplify the life of developers and their tools
+ %% (Dialyzer, completion, and so on).
+
+ %% -module(rabbit_ff_registry).
+ ModuleAttr = erl_syntax:attribute(
+ erl_syntax:atom(module),
+ [erl_syntax:atom(rabbit_ff_registry)]),
+ ModuleForm = erl_syntax:revert(ModuleAttr),
+ %% -export([...]).
+ ExportAttr = erl_syntax:attribute(
+ erl_syntax:atom(export),
+ [erl_syntax:list(
+ [erl_syntax:arity_qualifier(
+ erl_syntax:atom(F),
+ erl_syntax:integer(A))
+ || {F, A} <- [{get, 1},
+ {list, 1},
+ {states, 0},
+ {is_supported, 1},
+ {is_enabled, 1},
+ {is_registry_initialized, 0},
+ {is_registry_written_to_disk, 0}]]
+ )
+ ]
+ ),
+ ExportForm = erl_syntax:revert(ExportAttr),
+ %% get(_) -> ...
+ GetClauses = [erl_syntax:clause(
+ [erl_syntax:atom(FeatureName)],
+ [],
+ [erl_syntax:abstract(maps:get(FeatureName,
+ AllFeatureFlags))])
+ || FeatureName <- maps:keys(AllFeatureFlags)
+ ],
+ GetUnknownClause = erl_syntax:clause(
+ [erl_syntax:variable("_")],
+ [],
+ [erl_syntax:atom(undefined)]),
+ GetFun = erl_syntax:function(
+ erl_syntax:atom(get),
+ GetClauses ++ [GetUnknownClause]),
+ GetFunForm = erl_syntax:revert(GetFun),
+ %% list(_) -> ...
+ ListAllBody = erl_syntax:abstract(AllFeatureFlags),
+ ListAllClause = erl_syntax:clause([erl_syntax:atom(all)],
+ [],
+ [ListAllBody]),
+ EnabledFeatureFlags = maps:filter(
+ fun(FeatureName, _) ->
+ maps:is_key(FeatureName,
+ FeatureStates)
+ andalso
+ maps:get(FeatureName, FeatureStates)
+ =:=
+ true
+ end, AllFeatureFlags),
+ ListEnabledBody = erl_syntax:abstract(EnabledFeatureFlags),
+ ListEnabledClause = erl_syntax:clause(
+ [erl_syntax:atom(enabled)],
+ [],
+ [ListEnabledBody]),
+ DisabledFeatureFlags = maps:filter(
+ fun(FeatureName, _) ->
+ not maps:is_key(FeatureName,
+ FeatureStates)
+ end, AllFeatureFlags),
+ ListDisabledBody = erl_syntax:abstract(DisabledFeatureFlags),
+ ListDisabledClause = erl_syntax:clause(
+ [erl_syntax:atom(disabled)],
+ [],
+ [ListDisabledBody]),
+ StateChangingFeatureFlags = maps:filter(
+ fun(FeatureName, _) ->
+ maps:is_key(FeatureName,
+ FeatureStates)
+ andalso
+ maps:get(FeatureName, FeatureStates)
+ =:=
+ state_changing
+ end, AllFeatureFlags),
+ ListStateChangingBody = erl_syntax:abstract(StateChangingFeatureFlags),
+ ListStateChangingClause = erl_syntax:clause(
+ [erl_syntax:atom(state_changing)],
+ [],
+ [ListStateChangingBody]),
+ ListFun = erl_syntax:function(
+ erl_syntax:atom(list),
+ [ListAllClause,
+ ListEnabledClause,
+ ListDisabledClause,
+ ListStateChangingClause]),
+ ListFunForm = erl_syntax:revert(ListFun),
+ %% states() -> ...
+ StatesBody = erl_syntax:abstract(FeatureStates),
+ StatesClause = erl_syntax:clause([], [], [StatesBody]),
+ StatesFun = erl_syntax:function(
+ erl_syntax:atom(states),
+ [StatesClause]),
+ StatesFunForm = erl_syntax:revert(StatesFun),
+ %% is_supported(_) -> ...
+ IsSupportedClauses = [erl_syntax:clause(
+ [erl_syntax:atom(FeatureName)],
+ [],
+ [erl_syntax:atom(true)])
+ || FeatureName <- maps:keys(AllFeatureFlags)
+ ],
+ NotSupportedClause = erl_syntax:clause(
+ [erl_syntax:variable("_")],
+ [],
+ [erl_syntax:atom(false)]),
+ IsSupportedFun = erl_syntax:function(
+ erl_syntax:atom(is_supported),
+ IsSupportedClauses ++ [NotSupportedClause]),
+ IsSupportedFunForm = erl_syntax:revert(IsSupportedFun),
+ %% is_enabled(_) -> ...
+ IsEnabledClauses = [erl_syntax:clause(
+ [erl_syntax:atom(FeatureName)],
+ [],
+ [case maps:is_key(FeatureName, FeatureStates) of
+ true ->
+ erl_syntax:atom(
+ maps:get(FeatureName, FeatureStates));
+ false ->
+ erl_syntax:atom(false)
+ end])
+ || FeatureName <- maps:keys(AllFeatureFlags)
+ ],
+ NotEnabledClause = erl_syntax:clause(
+ [erl_syntax:variable("_")],
+ [],
+ [erl_syntax:atom(false)]),
+ IsEnabledFun = erl_syntax:function(
+ erl_syntax:atom(is_enabled),
+ IsEnabledClauses ++ [NotEnabledClause]),
+ IsEnabledFunForm = erl_syntax:revert(IsEnabledFun),
+ %% is_registry_initialized() -> ...
+ IsInitializedClauses = [erl_syntax:clause(
+ [],
+ [],
+ [erl_syntax:atom(true)])
+ ],
+ IsInitializedFun = erl_syntax:function(
+ erl_syntax:atom(is_registry_initialized),
+ IsInitializedClauses),
+ IsInitializedFunForm = erl_syntax:revert(IsInitializedFun),
+ %% is_registry_written_to_disk() -> ...
+ IsWrittenToDiskClauses = [erl_syntax:clause(
+ [],
+ [],
+ [erl_syntax:atom(WrittenToDisk)])
+ ],
+ IsWrittenToDiskFun = erl_syntax:function(
+ erl_syntax:atom(is_registry_written_to_disk),
+ IsWrittenToDiskClauses),
+ IsWrittenToDiskFunForm = erl_syntax:revert(IsWrittenToDiskFun),
+ %% Compilation!
+ Forms = [ModuleForm,
+ ExportForm,
+ GetFunForm,
+ ListFunForm,
+ StatesFunForm,
+ IsSupportedFunForm,
+ IsEnabledFunForm,
+ IsInitializedFunForm,
+ IsWrittenToDiskFunForm],
+ maybe_log_registry_source_code(Forms),
+ CompileOpts = [return_errors,
+ return_warnings],
+ case compile:forms(Forms, CompileOpts) of
+ {ok, Mod, Bin, _} ->
+ load_registry_mod(RegistryVsn, Mod, Bin);
+ {error, Errors, Warnings} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: registry compilation:~n"
+ "Errors: ~p~n"
+ "Warnings: ~p",
+ [Errors, Warnings]),
+ {error, {compilation_failure, Errors, Warnings}}
+ end.
+
+maybe_log_registry_source_code(Forms) ->
+ case rabbit_prelaunch:get_context() of
+ #{log_feature_flags_registry := true} ->
+ rabbit_log_feature_flags:debug(
+ "== FEATURE FLAGS REGISTRY ==~n"
+ "~s~n"
+ "== END ==~n",
+ [erl_prettypr:format(erl_syntax:form_list(Forms))]);
+ _ ->
+ ok
+ end.
+
+-ifdef(TEST).
+registry_loading_lock() -> ?FF_REGISTRY_LOADING_LOCK.
+-endif.
+
+-spec load_registry_mod(registry_vsn(), atom(), binary()) ->
+ ok | restart | no_return().
+%% @private
+
+load_registry_mod(RegistryVsn, Mod, Bin) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry module ready, loading it (~p)...",
+ [self()]),
+ FakeFilename = "Compiled and loaded by " ?MODULE_STRING,
+ %% Time to load the new registry, replacing the old one. We use a
+ %% lock here to synchronize concurrent reloads.
+ global:set_lock(?FF_REGISTRY_LOADING_LOCK, [node()]),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: acquired lock before reloading registry module (~p)",
+ [self()]),
+ %% We want to make sure that the old registry (not the one being
+ %% currently in use) is purged by the code server. It means no
+ %% process lingers on that old code.
+ %%
+ %% We use code:soft_purge() for that (meaning no process is killed)
+ %% and we wait in an infinite loop for that to succeed.
+ ok = purge_old_registry(Mod),
+ %% Now we can replace the currently loaded registry by the new one.
+ %% The code server takes care of marking the current registry as old
+ %% and load the new module in an atomic operation.
+ %%
+ %% Therefore there is no chance of a window where there is no
+ %% registry module available, causing the one on disk to be
+ %% reloaded.
+ Ret = case registry_vsn() of
+ RegistryVsn -> code:load_binary(Mod, FakeFilename, Bin);
+ OtherVsn -> {error, {restart, RegistryVsn, OtherVsn}}
+ end,
+ rabbit_log_feature_flags:debug(
+ "Feature flags: releasing lock after reloading registry module (~p)",
+ [self()]),
+ global:del_lock(?FF_REGISTRY_LOADING_LOCK, [node()]),
+ case Ret of
+ {module, _} ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry module loaded (vsn: ~p -> ~p)",
+ [RegistryVsn, registry_vsn()]),
+ ok;
+ {error, {restart, Expected, Current}} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: another registry module was loaded in the "
+ "meantime (expected old vsn: ~p, current vsn: ~p); "
+ "restarting the regen",
+ [Expected, Current]),
+ restart;
+ {error, Reason} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: failed to load registry module: ~p",
+ [Reason]),
+ throw({feature_flag_registry_reload_failure, Reason})
+ end.
+
+-spec registry_vsn() -> registry_vsn().
+%% @private
+
+registry_vsn() ->
+ Attrs = rabbit_ff_registry:module_info(attributes),
+ proplists:get_value(vsn, Attrs, undefined).
+
+purge_old_registry(Mod) ->
+ case code:is_loaded(Mod) of
+ {file, _} -> do_purge_old_registry(Mod);
+ false -> ok
+ end.
+
+do_purge_old_registry(Mod) ->
+ case code:soft_purge(Mod) of
+ true -> ok;
+ false -> do_purge_old_registry(Mod)
+ end.
+
+%% -------------------------------------------------------------------
+%% Feature flags state storage.
+%% -------------------------------------------------------------------
+
+-spec ensure_enabled_feature_flags_list_file_exists() -> ok | {error, any()}.
+%% @private
+
+ensure_enabled_feature_flags_list_file_exists() ->
+ File = enabled_feature_flags_list_file(),
+ case filelib:is_regular(File) of
+ true -> ok;
+ false -> write_enabled_feature_flags_list([])
+ end.
+
+-spec read_enabled_feature_flags_list() ->
+ [feature_name()] | no_return().
+%% @private
+
+read_enabled_feature_flags_list() ->
+ case try_to_read_enabled_feature_flags_list() of
+ {error, Reason} ->
+ File = enabled_feature_flags_list_file(),
+ throw({feature_flags_file_read_error, File, Reason});
+ Ret ->
+ Ret
+ end.
+
+-spec try_to_read_enabled_feature_flags_list() ->
+ [feature_name()] | {error, any()}.
+%% @private
+
+try_to_read_enabled_feature_flags_list() ->
+ File = enabled_feature_flags_list_file(),
+ case file:consult(File) of
+ {ok, [List]} ->
+ List;
+ {error, enoent} ->
+ %% If the file is missing, we consider the list of enabled
+ %% feature flags to be empty.
+ [];
+ {error, Reason} = Error ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: failed to read the `feature_flags` "
+ "file at `~s`: ~s",
+ [File, file:format_error(Reason)]),
+ Error
+ end.
+
+-spec write_enabled_feature_flags_list([feature_name()]) ->
+ ok | no_return().
+%% @private
+
+write_enabled_feature_flags_list(FeatureNames) ->
+ case try_to_write_enabled_feature_flags_list(FeatureNames) of
+ {error, Reason} ->
+ File = enabled_feature_flags_list_file(),
+ throw({feature_flags_file_write_error, File, Reason});
+ Ret ->
+ Ret
+ end.
+
+-spec try_to_write_enabled_feature_flags_list([feature_name()]) ->
+ ok | {error, any()}.
+%% @private
+
+try_to_write_enabled_feature_flags_list(FeatureNames) ->
+ %% Before writing the new file, we read the existing one. If there
+ %% are unknown feature flags in that file, we want to keep their
+ %% state, even though they are unsupported at this time. It could be
+ %% that a plugin was disabled in the meantime.
+ %%
+ %% FIXME: Lock this code to fix concurrent read/modify/write.
+ PreviouslyEnabled = case try_to_read_enabled_feature_flags_list() of
+ {error, _} -> [];
+ List -> List
+ end,
+ FeatureNames1 = lists:foldl(
+ fun(Name, Acc) ->
+ case is_supported_locally(Name) of
+ true -> Acc;
+ false -> [Name | Acc]
+ end
+ end, FeatureNames, PreviouslyEnabled),
+ FeatureNames2 = lists:sort(FeatureNames1),
+
+ File = enabled_feature_flags_list_file(),
+ Content = io_lib:format("~p.~n", [FeatureNames2]),
+ %% TODO: If we fail to write the the file, we should spawn a process
+ %% to retry the operation.
+ case file:write_file(File, Content) of
+ ok ->
+ ok;
+ {error, Reason} = Error ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: failed to write the `feature_flags` "
+ "file at `~s`: ~s",
+ [File, file:format_error(Reason)]),
+ Error
+ end.
+
+-spec enabled_feature_flags_list_file() -> file:filename().
+%% @doc
+%% Returns the path to the file where the state of feature flags is stored.
+%%
+%% @returns the path to the file.
+
+enabled_feature_flags_list_file() ->
+ case application:get_env(rabbit, feature_flags_file) of
+ {ok, Val} -> Val;
+ undefined -> throw(feature_flags_file_not_set)
+ end.
+
+%% -------------------------------------------------------------------
+%% Feature flags management: enabling.
+%% -------------------------------------------------------------------
+
+-spec do_enable(feature_name()) -> ok | {error, any()} | no_return().
+%% @private
+
+do_enable(FeatureName) ->
+ %% We mark this feature flag as "state changing" before doing the
+ %% actual state change. We also take a global lock: this permits
+ %% to block callers asking about a feature flag changing state.
+ global:set_lock(?FF_STATE_CHANGE_LOCK),
+ Ret = case mark_as_enabled(FeatureName, state_changing) of
+ ok ->
+ case enable_dependencies(FeatureName, true) of
+ ok ->
+ case run_migration_fun(FeatureName, enable) of
+ ok ->
+ mark_as_enabled(FeatureName, true);
+ {error, no_migration_fun} ->
+ mark_as_enabled(FeatureName, true);
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end,
+ case Ret of
+ ok -> ok;
+ _ -> mark_as_enabled(FeatureName, false)
+ end,
+ global:del_lock(?FF_STATE_CHANGE_LOCK),
+ Ret.
+
+-spec enable_locally(feature_name()) -> ok | {error, any()} | no_return().
+%% @private
+
+enable_locally(FeatureName) when is_atom(FeatureName) ->
+ case is_enabled(FeatureName) of
+ true ->
+ ok;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: enable locally (as part of feature "
+ "flag states synchronization)",
+ [FeatureName]),
+ do_enable_locally(FeatureName)
+ end.
+
+-spec do_enable_locally(feature_name()) -> ok | {error, any()} | no_return().
+%% @private
+
+do_enable_locally(FeatureName) ->
+ case enable_dependencies(FeatureName, false) of
+ ok ->
+ case run_migration_fun(FeatureName, enable) of
+ ok ->
+ mark_as_enabled_locally(FeatureName, true);
+ {error, no_migration_fun} ->
+ mark_as_enabled_locally(FeatureName, true);
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+-spec enable_dependencies(feature_name(), boolean()) ->
+ ok | {error, any()} | no_return().
+%% @private
+
+enable_dependencies(FeatureName, Everywhere) ->
+ FeatureProps = rabbit_ff_registry:get(FeatureName),
+ DependsOn = maps:get(depends_on, FeatureProps, []),
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: enable dependencies: ~p",
+ [FeatureName, DependsOn]),
+ enable_dependencies(FeatureName, DependsOn, Everywhere).
+
+-spec enable_dependencies(feature_name(), [feature_name()], boolean()) ->
+ ok | {error, any()} | no_return().
+%% @private
+
+enable_dependencies(TopLevelFeatureName, [FeatureName | Rest], Everywhere) ->
+ Ret = case Everywhere of
+ true -> enable(FeatureName);
+ false -> enable_locally(FeatureName)
+ end,
+ case Ret of
+ ok -> enable_dependencies(TopLevelFeatureName, Rest, Everywhere);
+ Error -> Error
+ end;
+enable_dependencies(_, [], _) ->
+ ok.
+
+-spec run_migration_fun(feature_name(), any()) ->
+ any() | {error, any()}.
+%% @private
+
+run_migration_fun(FeatureName, Arg) ->
+ FeatureProps = rabbit_ff_registry:get(FeatureName),
+ run_migration_fun(FeatureName, FeatureProps, Arg).
+
+run_migration_fun(FeatureName, FeatureProps, Arg) ->
+ case maps:get(migration_fun, FeatureProps, none) of
+ {MigrationMod, MigrationFun}
+ when is_atom(MigrationMod) andalso is_atom(MigrationFun) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: run migration function ~p with arg: ~p",
+ [FeatureName, MigrationFun, Arg]),
+ try
+ erlang:apply(MigrationMod,
+ MigrationFun,
+ [FeatureName, FeatureProps, Arg])
+ catch
+ _:Reason:Stacktrace ->
+ rabbit_log_feature_flags:error(
+ "Feature flag `~s`: migration function crashed: ~p~n~p",
+ [FeatureName, Reason, Stacktrace]),
+ {error, {migration_fun_crash, Reason, Stacktrace}}
+ end;
+ none ->
+ {error, no_migration_fun};
+ Invalid ->
+ rabbit_log_feature_flags:error(
+ "Feature flag `~s`: invalid migration function: ~p",
+ [FeatureName, Invalid]),
+ {error, {invalid_migration_fun, Invalid}}
+ end.
+
+-spec mark_as_enabled(feature_name(), feature_state()) ->
+ any() | {error, any()} | no_return().
+%% @private
+
+mark_as_enabled(FeatureName, IsEnabled) ->
+ case mark_as_enabled_locally(FeatureName, IsEnabled) of
+ ok ->
+ mark_as_enabled_remotely(FeatureName, IsEnabled);
+ Error ->
+ Error
+ end.
+
+-spec mark_as_enabled_locally(feature_name(), feature_state()) ->
+ any() | {error, any()} | no_return().
+%% @private
+
+mark_as_enabled_locally(FeatureName, IsEnabled) ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: mark as enabled=~p",
+ [FeatureName, IsEnabled]),
+ EnabledFeatureNames = maps:keys(list(enabled)),
+ NewEnabledFeatureNames = case IsEnabled of
+ true ->
+ [FeatureName | EnabledFeatureNames];
+ false ->
+ EnabledFeatureNames -- [FeatureName];
+ state_changing ->
+ EnabledFeatureNames
+ end,
+ WrittenToDisk = case NewEnabledFeatureNames of
+ EnabledFeatureNames ->
+ rabbit_ff_registry:is_registry_written_to_disk();
+ _ ->
+ ok =:= try_to_write_enabled_feature_flags_list(
+ NewEnabledFeatureNames)
+ end,
+ initialize_registry(#{},
+ #{FeatureName => IsEnabled},
+ WrittenToDisk).
+
+-spec mark_as_enabled_remotely(feature_name(), feature_state()) ->
+ any() | {error, any()} | no_return().
+%% @private
+
+mark_as_enabled_remotely(FeatureName, IsEnabled) ->
+ Nodes = running_remote_nodes(),
+ mark_as_enabled_remotely(Nodes, FeatureName, IsEnabled, ?TIMEOUT).
+
+-spec mark_as_enabled_remotely([node()],
+ feature_name(),
+ feature_state(),
+ timeout()) ->
+ any() | {error, any()} | no_return().
+%% @private
+
+mark_as_enabled_remotely([], _FeatureName, _IsEnabled, _Timeout) ->
+ ok;
+mark_as_enabled_remotely(Nodes, FeatureName, IsEnabled, Timeout) ->
+ T0 = erlang:timestamp(),
+ Rets = [{Node, rpc:call(Node,
+ ?MODULE,
+ mark_as_enabled_locally,
+ [FeatureName, IsEnabled],
+ Timeout)}
+ || Node <- Nodes],
+ FailedNodes = [Node || {Node, Ret} <- Rets, Ret =/= ok],
+ case FailedNodes of
+ [] ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: `~s` successfully marked as enabled=~p on all "
+ "nodes", [FeatureName, IsEnabled]),
+ ok;
+ _ ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: failed to mark feature flag `~s` as enabled=~p "
+ "on the following nodes:", [FeatureName, IsEnabled]),
+ [rabbit_log_feature_flags:error(
+ "Feature flags: - ~s: ~p",
+ [Node, Ret])
+ || {Node, Ret} <- Rets,
+ Ret =/= ok],
+ Sleep = 1000,
+ T1 = erlang:timestamp(),
+ Duration = timer:now_diff(T1, T0),
+ NewTimeout = (Timeout * 1000 - Duration) div 1000 - Sleep,
+ if
+ NewTimeout > 0 ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: retrying with a timeout of ~b "
+ "ms after sleeping for ~b ms",
+ [NewTimeout, Sleep]),
+ timer:sleep(Sleep),
+ mark_as_enabled_remotely(FailedNodes,
+ FeatureName,
+ IsEnabled,
+ NewTimeout);
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: not retrying; RPC went over the "
+ "~b milliseconds timeout", [Timeout]),
+ %% FIXME: Is crashing the process the best solution here?
+ throw(
+ {failed_to_mark_feature_flag_as_enabled_on_remote_nodes,
+ FeatureName, IsEnabled, FailedNodes})
+ end
+ end.
+
+%% -------------------------------------------------------------------
+%% Coordination with remote nodes.
+%% -------------------------------------------------------------------
+
+-spec remote_nodes() -> [node()].
+%% @private
+
+remote_nodes() ->
+ mnesia:system_info(db_nodes) -- [node()].
+
+-spec running_remote_nodes() -> [node()].
+%% @private
+
+running_remote_nodes() ->
+ mnesia:system_info(running_db_nodes) -- [node()].
+
+query_running_remote_nodes(Node, Timeout) ->
+ case rpc:call(Node, mnesia, system_info, [running_db_nodes], Timeout) of
+ {badrpc, _} = Error -> Error;
+ Nodes -> Nodes -- [node()]
+ end.
+
+-spec does_node_support(node(), [feature_name()], timeout()) -> boolean().
+%% @private
+
+does_node_support(Node, FeatureNames, Timeout) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: querying `~p` support on node ~s...",
+ [FeatureNames, Node]),
+ Ret = case node() of
+ Node ->
+ is_supported_locally(FeatureNames);
+ _ ->
+ run_feature_flags_mod_on_remote_node(
+ Node, is_supported_locally, [FeatureNames], Timeout)
+ end,
+ case Ret of
+ {error, pre_feature_flags_rabbitmq} ->
+ %% See run_feature_flags_mod_on_remote_node/4 for
+ %% an explanation why we consider this node a 3.7.x
+ %% pre-feature-flags node.
+ rabbit_log_feature_flags:debug(
+ "Feature flags: no feature flags support on node `~s`, "
+ "consider the feature flags unsupported: ~p",
+ [Node, FeatureNames]),
+ false;
+ {error, Reason} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: error while querying `~p` support on "
+ "node ~s: ~p",
+ [FeatureNames, Node, Reason]),
+ false;
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` supports `~p`",
+ [Node, FeatureNames]),
+ true;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` does not support `~p`; "
+ "stopping query here",
+ [Node, FeatureNames]),
+ false
+ end.
+
+-spec check_node_compatibility(node()) -> ok | {error, any()}.
+%% @doc
+%% Checks if a node is compatible with the local node.
+%%
+%% To be compatible, the following two conditions must be met:
+%% <ol>
+%% <li>feature flags enabled on the local node must be supported by the
+%% remote node</li>
+%% <li>feature flags enabled on the remote node must be supported by the
+%% local node</li>
+%% </ol>
+%%
+%% @param Node the name of the remote node to test.
+%% @returns `ok' if they are compatible, `{error, Reason}' if they are not.
+
+check_node_compatibility(Node) ->
+ check_node_compatibility(Node, ?TIMEOUT).
+
+-spec check_node_compatibility(node(), timeout()) -> ok | {error, any()}.
+%% @doc
+%% Checks if a node is compatible with the local node.
+%%
+%% See {@link check_node_compatibility/1} for the conditions required to
+%% consider two nodes compatible.
+%%
+%% @param Node the name of the remote node to test.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `ok' if they are compatible, `{error, Reason}' if they are not.
+%%
+%% @see check_node_compatibility/1
+
+check_node_compatibility(Node, Timeout) ->
+ %% Before checking compatibility, we exchange feature flags from
+ %% unknown Erlang applications. So we fetch remote feature flags
+ %% from applications which are not loaded locally, and the opposite.
+ %%
+ %% The goal is that such feature flags are not blocking the
+ %% communication between nodes because the code (which would
+ %% break) is missing on those nodes. Therefore they should not be
+ %% considered when determining compatibility.
+ exchange_feature_flags_from_unknown_apps(Node, Timeout),
+
+ %% FIXME:
+ %% When we try to cluster two nodes, we get:
+ %% Feature flags: starting an unclustered node: all feature flags
+ %% will be enabled by default
+ %% It should probably not be the case...
+
+ %% We can now proceed with the actual compatibility check.
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` compatibility check, part 1/2",
+ [Node]),
+ Part1 = local_enabled_feature_flags_is_supported_remotely(Node, Timeout),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` compatibility check, part 2/2",
+ [Node]),
+ Part2 = remote_enabled_feature_flags_is_supported_locally(Node, Timeout),
+ case {Part1, Part2} of
+ {true, true} ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` is compatible",
+ [Node]),
+ ok;
+ {false, _} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: node `~s` is INCOMPATIBLE: "
+ "feature flags enabled locally are not supported remotely",
+ [Node]),
+ {error, incompatible_feature_flags};
+ {_, false} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: node `~s` is INCOMPATIBLE: "
+ "feature flags enabled remotely are not supported locally",
+ [Node]),
+ {error, incompatible_feature_flags}
+ end.
+
+-spec is_node_compatible(node()) -> boolean().
+%% @doc
+%% Returns if a node is compatible with the local node.
+%%
+%% This function calls {@link check_node_compatibility/2} and returns
+%% `true' the latter returns `ok'. Therefore this is the same code,
+%% except that this function returns a boolean, but not the reason of
+%% the incompatibility if any.
+%%
+%% @param Node the name of the remote node to test.
+%% @returns `true' if they are compatible, `false' otherwise.
+
+is_node_compatible(Node) ->
+ is_node_compatible(Node, ?TIMEOUT).
+
+-spec is_node_compatible(node(), timeout()) -> boolean().
+%% @doc
+%% Returns if a node is compatible with the local node.
+%%
+%% This function calls {@link check_node_compatibility/2} and returns
+%% `true' the latter returns `ok'. Therefore this is the same code,
+%% except that this function returns a boolean, but not the reason
+%% of the incompatibility if any. If the RPC times out, nodes are
+%% considered incompatible.
+%%
+%% @param Node the name of the remote node to test.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `true' if they are compatible, `false' otherwise.
+
+is_node_compatible(Node, Timeout) ->
+ check_node_compatibility(Node, Timeout) =:= ok.
+
+-spec local_enabled_feature_flags_is_supported_remotely(node(),
+ timeout()) ->
+ boolean().
+%% @private
+
+local_enabled_feature_flags_is_supported_remotely(Node, Timeout) ->
+ LocalEnabledFeatureNames = maps:keys(list(enabled)),
+ is_supported_remotely([Node], LocalEnabledFeatureNames, Timeout).
+
+-spec remote_enabled_feature_flags_is_supported_locally(node(),
+ timeout()) ->
+ boolean().
+%% @private
+
+remote_enabled_feature_flags_is_supported_locally(Node, Timeout) ->
+ case query_remote_feature_flags(Node, enabled, Timeout) of
+ {error, _} ->
+ false;
+ RemoteEnabledFeatureFlags when is_map(RemoteEnabledFeatureFlags) ->
+ RemoteEnabledFeatureNames = maps:keys(RemoteEnabledFeatureFlags),
+ is_supported_locally(RemoteEnabledFeatureNames)
+ end.
+
+-spec run_feature_flags_mod_on_remote_node(node(),
+ atom(),
+ [term()],
+ timeout()) ->
+ term() | {error, term()}.
+%% @private
+
+run_feature_flags_mod_on_remote_node(Node, Function, Args, Timeout) ->
+ case rpc:call(Node, ?MODULE, Function, Args, Timeout) of
+ {badrpc, {'EXIT',
+ {undef,
+ [{?MODULE, Function, Args, []}
+ | _]}}} ->
+ %% If rabbit_feature_flags:Function() is undefined
+ %% on the remote node, we consider it to be a 3.7.x
+ %% pre-feature-flags node.
+ %%
+ %% Theoretically, it could be an older version (3.6.x and
+ %% older). But the RabbitMQ version consistency check
+ %% (rabbit_misc:version_minor_equivalent/2) called from
+ %% rabbit_mnesia:check_rabbit_consistency/2 already blocked
+ %% this situation from happening before we reach this point.
+ rabbit_log_feature_flags:debug(
+ "Feature flags: ~s:~s~p unavailable on node `~s`: "
+ "assuming it is a RabbitMQ 3.7.x pre-feature-flags node",
+ [?MODULE, Function, Args, Node]),
+ {error, pre_feature_flags_rabbitmq};
+ {badrpc, Reason} = Error ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: error while running ~s:~s~p "
+ "on node `~s`: ~p",
+ [?MODULE, Function, Args, Node, Reason]),
+ {error, Error};
+ Ret ->
+ Ret
+ end.
+
+-spec query_remote_feature_flags(node(),
+ Which :: all | enabled | disabled,
+ timeout()) ->
+ feature_flags() | {error, any()}.
+%% @private
+
+query_remote_feature_flags(Node, Which, Timeout) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: querying ~s feature flags on node `~s`...",
+ [Which, Node]),
+ case run_feature_flags_mod_on_remote_node(Node, list, [Which], Timeout) of
+ {error, pre_feature_flags_rabbitmq} ->
+ %% See run_feature_flags_mod_on_remote_node/4 for
+ %% an explanation why we consider this node a 3.7.x
+ %% pre-feature-flags node.
+ rabbit_log_feature_flags:debug(
+ "Feature flags: no feature flags support on node `~s`, "
+ "consider the list of feature flags empty", [Node]),
+ #{};
+ {error, Reason} = Error ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: error while querying ~s feature flags "
+ "on node `~s`: ~p",
+ [Which, Node, Reason]),
+ Error;
+ RemoteFeatureFlags when is_map(RemoteFeatureFlags) ->
+ RemoteFeatureNames = maps:keys(RemoteFeatureFlags),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: querying ~s feature flags on node `~s` "
+ "done; ~s features: ~p",
+ [Which, Node, Which, RemoteFeatureNames]),
+ RemoteFeatureFlags
+ end.
+
+-spec merge_feature_flags_from_unknown_apps(feature_flags()) ->
+ ok | {error, any()}.
+%% @private
+
+merge_feature_flags_from_unknown_apps(FeatureFlags)
+ when is_map(FeatureFlags) ->
+ LoadedApps = [App || {App, _, _} <- application:loaded_applications()],
+ FeatureFlagsFromUnknownApps =
+ maps:fold(
+ fun(FeatureName, FeatureProps, UnknownFF) ->
+ case is_supported_locally(FeatureName) of
+ true ->
+ UnknownFF;
+ false ->
+ FeatureProvider = maps:get(provided_by, FeatureProps),
+ case lists:member(FeatureProvider, LoadedApps) of
+ true -> UnknownFF;
+ false -> maps:put(FeatureName, FeatureProps,
+ UnknownFF)
+ end
+ end
+ end,
+ #{},
+ FeatureFlags),
+ case maps:keys(FeatureFlagsFromUnknownApps) of
+ [] ->
+ ok;
+ _ ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: register feature flags provided by applications "
+ "unknown locally: ~p",
+ [maps:keys(FeatureFlagsFromUnknownApps)]),
+ initialize_registry(FeatureFlagsFromUnknownApps)
+ end.
+
+exchange_feature_flags_from_unknown_apps(Node, Timeout) ->
+ %% The first step is to fetch feature flags from Erlang applications
+ %% we don't know locally (they are loaded remotely, but not
+ %% locally).
+ fetch_remote_feature_flags_from_apps_unknown_locally(Node, Timeout),
+
+ %% The next step is to do the opposite: push feature flags to remote
+ %% nodes so they can register those from applications they don't
+ %% know.
+ push_local_feature_flags_from_apps_unknown_remotely(Node, Timeout).
+
+fetch_remote_feature_flags_from_apps_unknown_locally(Node, Timeout) ->
+ RemoteFeatureFlags = query_remote_feature_flags(Node, all, Timeout),
+ merge_feature_flags_from_unknown_apps(RemoteFeatureFlags).
+
+push_local_feature_flags_from_apps_unknown_remotely(Node, Timeout) ->
+ LocalFeatureFlags = list(all),
+ push_local_feature_flags_from_apps_unknown_remotely(
+ Node, LocalFeatureFlags, Timeout).
+
+push_local_feature_flags_from_apps_unknown_remotely(
+ Node, FeatureFlags, Timeout)
+ when map_size(FeatureFlags) > 0 ->
+ case query_running_remote_nodes(Node, Timeout) of
+ {badrpc, Reason} ->
+ {error, Reason};
+ Nodes ->
+ lists:foreach(
+ fun(N) ->
+ run_feature_flags_mod_on_remote_node(
+ N,
+ merge_feature_flags_from_unknown_apps,
+ [FeatureFlags],
+ Timeout)
+ end, Nodes)
+ end;
+push_local_feature_flags_from_apps_unknown_remotely(_, _, _) ->
+ ok.
+
+-spec sync_feature_flags_with_cluster([node()], boolean()) ->
+ ok | {error, any()} | no_return().
+%% @private
+
+sync_feature_flags_with_cluster(Nodes, NodeIsVirgin) ->
+ sync_feature_flags_with_cluster(Nodes, NodeIsVirgin, ?TIMEOUT).
+
+-spec sync_feature_flags_with_cluster([node()], boolean(), timeout()) ->
+ ok | {error, any()} | no_return().
+%% @private
+
+sync_feature_flags_with_cluster([], NodeIsVirgin, _) ->
+ verify_which_feature_flags_are_actually_enabled(),
+ case NodeIsVirgin of
+ true ->
+ FeatureNames = get_forced_feature_flag_names(),
+ case remote_nodes() of
+ [] when FeatureNames =:= undefined ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: starting an unclustered node "
+ "for the first time: all feature flags will be "
+ "enabled by default"),
+ enable_all();
+ [] ->
+ case FeatureNames of
+ [] ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: starting an unclustered "
+ "node for the first time: all feature "
+ "flags are forcibly left disabled from "
+ "the $RABBITMQ_FEATURE_FLAGS environment "
+ "variable"),
+ ok;
+ _ ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: starting an unclustered "
+ "node for the first time: only the "
+ "following feature flags specified in "
+ "the $RABBITMQ_FEATURE_FLAGS environment "
+ "variable will be enabled: ~p",
+ [FeatureNames]),
+ enable(FeatureNames)
+ end;
+ _ ->
+ ok
+ end;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: starting an unclustered node which is "
+ "already initialized: all feature flags left in their "
+ "current state"),
+ ok
+ end;
+sync_feature_flags_with_cluster(Nodes, _, Timeout) ->
+ verify_which_feature_flags_are_actually_enabled(),
+ RemoteNodes = Nodes -- [node()],
+ sync_feature_flags_with_cluster1(RemoteNodes, Timeout).
+
+sync_feature_flags_with_cluster1([], _) ->
+ ok;
+sync_feature_flags_with_cluster1(RemoteNodes, Timeout) ->
+ RandomRemoteNode = pick_one_node(RemoteNodes),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: SYNCING FEATURE FLAGS with node `~s`...",
+ [RandomRemoteNode]),
+ case query_remote_feature_flags(RandomRemoteNode, enabled, Timeout) of
+ {error, _} = Error ->
+ Error;
+ RemoteFeatureFlags ->
+ RemoteFeatureNames = maps:keys(RemoteFeatureFlags),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: enabling locally feature flags already "
+ "enabled on node `~s`...",
+ [RandomRemoteNode]),
+ case do_sync_feature_flags_with_node(RemoteFeatureNames) of
+ ok ->
+ sync_feature_flags_with_cluster2(
+ RandomRemoteNode, Timeout);
+ Error ->
+ Error
+ end
+ end.
+
+sync_feature_flags_with_cluster2(RandomRemoteNode, Timeout) ->
+ LocalFeatureNames = maps:keys(list(enabled)),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: enabling on node `~s` feature flags already "
+ "enabled locally...",
+ [RandomRemoteNode]),
+ Ret = run_feature_flags_mod_on_remote_node(
+ RandomRemoteNode,
+ do_sync_feature_flags_with_node,
+ [LocalFeatureNames],
+ Timeout),
+ case Ret of
+ {error, pre_feature_flags_rabbitmq} -> ok;
+ _ -> Ret
+ end.
+
+pick_one_node(Nodes) ->
+ RandomIndex = rand:uniform(length(Nodes)),
+ lists:nth(RandomIndex, Nodes).
+
+do_sync_feature_flags_with_node([FeatureFlag | Rest]) ->
+ case enable_locally(FeatureFlag) of
+ ok -> do_sync_feature_flags_with_node(Rest);
+ Error -> Error
+ end;
+do_sync_feature_flags_with_node([]) ->
+ ok.
+
+-spec get_forced_feature_flag_names() -> [feature_name()] | undefined.
+%% @private
+%% @doc
+%% Returns the (possibly empty) list of feature flags the user want
+%% to enable out-of-the-box when starting a node for the first time.
+%%
+%% Without this, the default is to enable all the supported feature
+%% flags.
+%%
+%% There are two ways to specify that list:
+%% <ol>
+%% <li>Using the `$RABBITMQ_FEATURE_FLAGS' environment variable; for
+%% instance `RABBITMQ_FEATURE_FLAGS=quorum_queue,mnevis'.</li>
+%% <li>Using the `forced_feature_flags_on_init' configuration parameter;
+%% for instance
+%% `{rabbit, [{forced_feature_flags_on_init, [quorum_queue, mnevis]}]}'.</li>
+%% </ol>
+%%
+%% The environment variable has precedence over the configuration
+%% parameter.
+
+get_forced_feature_flag_names() ->
+ Ret = case get_forced_feature_flag_names_from_env() of
+ undefined -> get_forced_feature_flag_names_from_config();
+ List -> List
+ end,
+ case Ret of
+ undefined -> ok;
+ [] -> rabbit_log_feature_flags:info(
+ "Feature flags: automatic enablement of feature "
+ "flags disabled (i.e. none will be enabled "
+ "automatically)");
+ _ -> rabbit_log_feature_flags:info(
+ "Feature flags: automatic enablement of feature "
+ "flags limited to the following list: ~p", [Ret])
+ end,
+ Ret.
+
+-spec get_forced_feature_flag_names_from_env() -> [feature_name()] | undefined.
+%% @private
+
+get_forced_feature_flag_names_from_env() ->
+ case rabbit_prelaunch:get_context() of
+ #{forced_feature_flags_on_init := ForcedFFs}
+ when is_list(ForcedFFs) ->
+ ForcedFFs;
+ _ ->
+ undefined
+ end.
+
+-spec get_forced_feature_flag_names_from_config() -> [feature_name()] | undefined.
+%% @private
+
+get_forced_feature_flag_names_from_config() ->
+ Value = application:get_env(rabbit,
+ forced_feature_flags_on_init,
+ undefined),
+ case Value of
+ undefined ->
+ Value;
+ _ when is_list(Value) ->
+ case lists:all(fun is_atom/1, Value) of
+ true -> Value;
+ false -> undefined
+ end;
+ _ ->
+ undefined
+ end.
+
+-spec verify_which_feature_flags_are_actually_enabled() ->
+ ok | {error, any()} | no_return().
+%% @private
+
+verify_which_feature_flags_are_actually_enabled() ->
+ AllFeatureFlags = list(all),
+ EnabledFeatureNames = read_enabled_feature_flags_list(),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: double-checking feature flag states..."),
+ %% In case the previous instance of the node failed to write the
+ %% feature flags list file, we want to double-check the list of
+ %% enabled feature flags read from disk. For each feature flag,
+ %% we call the migration function to query if the feature flag is
+ %% actually enabled.
+ %%
+ %% If a feature flag doesn't provide a migration function (or if the
+ %% function fails), we keep the current state of the feature flag.
+ List1 = maps:fold(
+ fun(Name, Props, Acc) ->
+ Ret = run_migration_fun(Name, Props, is_enabled),
+ case Ret of
+ true ->
+ [Name | Acc];
+ false ->
+ Acc;
+ _ ->
+ MarkedAsEnabled = is_enabled(Name),
+ case MarkedAsEnabled of
+ true -> [Name | Acc];
+ false -> Acc
+ end
+ end
+ end,
+ [], AllFeatureFlags),
+ RepairedEnabledFeatureNames = lists:sort(List1),
+ %% We log the list of feature flags for which the state changes
+ %% after the check above.
+ WereEnabled = RepairedEnabledFeatureNames -- EnabledFeatureNames,
+ WereDisabled = EnabledFeatureNames -- RepairedEnabledFeatureNames,
+ case {WereEnabled, WereDisabled} of
+ {[], []} -> ok;
+ _ -> rabbit_log_feature_flags:warning(
+ "Feature flags: the previous instance of this node "
+ "must have failed to write the `feature_flags` "
+ "file at `~s`:",
+ [enabled_feature_flags_list_file()])
+ end,
+ case WereEnabled of
+ [] -> ok;
+ _ -> rabbit_log_feature_flags:warning(
+ "Feature flags: - list of previously enabled "
+ "feature flags now marked as such: ~p", [WereEnabled])
+ end,
+ case WereDisabled of
+ [] -> ok;
+ _ -> rabbit_log_feature_flags:warning(
+ "Feature flags: - list of previously disabled "
+ "feature flags now marked as such: ~p", [WereDisabled])
+ end,
+ %% Finally, if the new list of enabled feature flags is different
+ %% than the one on disk, we write the new list and re-initialize the
+ %% registry.
+ case RepairedEnabledFeatureNames of
+ EnabledFeatureNames ->
+ ok;
+ _ ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: write the repaired list of enabled feature "
+ "flags"),
+ WrittenToDisk = ok =:= try_to_write_enabled_feature_flags_list(
+ RepairedEnabledFeatureNames),
+ initialize_registry(
+ #{},
+ list_of_enabled_feature_flags_to_feature_states(
+ RepairedEnabledFeatureNames),
+ WrittenToDisk)
+ end.
+
+-spec refresh_feature_flags_after_app_load([atom()]) ->
+ ok | {error, any()} | no_return().
+
+refresh_feature_flags_after_app_load([]) ->
+ ok;
+refresh_feature_flags_after_app_load(Apps) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: new apps loaded: ~p -> refreshing feature flags",
+ [Apps]),
+
+ FeatureFlags0 = list(all),
+ FeatureFlags1 = query_supported_feature_flags(),
+
+ %% The following list contains all the feature flags this node
+ %% learned about only because remote nodes have them. Now, the
+ %% applications providing them are loaded locally as well.
+ %% Therefore, we may run their migration function in case the state
+ %% of this node needs it.
+ AlreadySupportedFeatureNames = maps:keys(
+ maps:filter(
+ fun(_, #{provided_by := App}) ->
+ lists:member(App, Apps)
+ end, FeatureFlags0)),
+ case AlreadySupportedFeatureNames of
+ [] ->
+ ok;
+ _ ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: new apps loaded: feature flags already "
+ "supported: ~p",
+ [lists:sort(AlreadySupportedFeatureNames)])
+ end,
+
+ %% The following list contains all the feature flags no nodes in the
+ %% cluster knew about before: this is the first time we see them in
+ %% this instance of the cluster. We need to register them on all
+ %% nodes.
+ NewSupportedFeatureFlags = maps:filter(
+ fun(FeatureName, _) ->
+ not maps:is_key(FeatureName,
+ FeatureFlags0)
+ end, FeatureFlags1),
+ case maps:keys(NewSupportedFeatureFlags) of
+ [] ->
+ ok;
+ NewSupportedFeatureNames ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: new apps loaded: new feature flags (unseen so "
+ "far): ~p ",
+ [lists:sort(NewSupportedFeatureNames)])
+ end,
+
+ case initialize_registry() of
+ ok ->
+ Ret = maybe_enable_locally_after_app_load(
+ AlreadySupportedFeatureNames),
+ case Ret of
+ ok ->
+ share_new_feature_flags_after_app_load(
+ NewSupportedFeatureFlags, ?TIMEOUT);
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+maybe_enable_locally_after_app_load([]) ->
+ ok;
+maybe_enable_locally_after_app_load([FeatureName | Rest]) ->
+ case is_enabled(FeatureName) of
+ true ->
+ case do_enable_locally(FeatureName) of
+ ok -> maybe_enable_locally_after_app_load(Rest);
+ Error -> Error
+ end;
+ false ->
+ maybe_enable_locally_after_app_load(Rest)
+ end.
+
+share_new_feature_flags_after_app_load(FeatureFlags, Timeout) ->
+ push_local_feature_flags_from_apps_unknown_remotely(
+ node(), FeatureFlags, Timeout).
+
+on_load() ->
+ %% The goal of this `on_load()` code server hook is to prevent this
+ %% module from being loaded in an already running RabbitMQ node if
+ %% the running version does not have the feature flags subsystem.
+ %%
+ %% This situation happens when an upgrade overwrites RabbitMQ files
+ %% with the node still running. This is the case with many packages:
+ %% files are updated on disk, then a post-install step takes care of
+ %% restarting the service.
+ %%
+ %% The problem is that if many nodes in a cluster are updated at the
+ %% same time, one node running the newer version might query feature
+ %% flags on an old node where this module is already available
+ %% (because files were already overwritten). This causes the query
+ %% to report an unexpected answer and the newer node to refuse to
+ %% start.
+ %%
+ %% However, when the module is executed outside of RabbitMQ (for
+ %% debugging purpose or in the context of EUnit for instance), we
+ %% want to allow the load. That's why we first check if RabbitMQ is
+ %% actually running.
+ case rabbit:is_running() of
+ true ->
+ %% RabbitMQ is running.
+ %%
+ %% Now we want to differentiate a pre-feature-flags node
+ %% from one having the subsystem.
+ %%
+ %% To do that, we verify if the `feature_flags_file`
+ %% application environment variable is defined. With a
+ %% feature-flags-enabled node, this application environment
+ %% variable is defined by rabbitmq-server(8).
+ case application:get_env(rabbit, feature_flags_file) of
+ {ok, _} ->
+ %% This is a feature-flags-enabled version. Loading
+ %% the module is permitted.
+ ok;
+ _ ->
+ %% This is a pre-feature-flags version. We deny the
+ %% load and report why, possibly specifying the
+ %% version of RabbitMQ.
+ Vsn = case application:get_key(rabbit, vsn) of
+ {ok, V} -> V;
+ undefined -> "unknown version"
+ end,
+ "Refusing to load '" ?MODULE_STRING "' on this "
+ "node. It appears to be running a pre-feature-flags "
+ "version of RabbitMQ (" ++ Vsn ++ "). This is fine: "
+ "a newer version of RabbitMQ was deployed on this "
+ "node, but it was not restarted yet. This warning "
+ "is probably caused by a remote node querying this "
+ "node for its feature flags."
+ end;
+ false ->
+ %% RabbitMQ is not running. Loading the module is permitted
+ %% because this Erlang node will never be queried for its
+ %% feature flags.
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_ff_extra.erl b/deps/rabbit/src/rabbit_ff_extra.erl
new file mode 100644
index 0000000000..f0728d491e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_ff_extra.erl
@@ -0,0 +1,244 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% @copyright 2018-2020 VMware, Inc. or its affiliates.
+%%
+%% @doc
+%% This module provides extra functions unused by the feature flags
+%% subsystem core functionality.
+
+-module(rabbit_ff_extra).
+
+-include_lib("stdout_formatter/include/stdout_formatter.hrl").
+
+-export([cli_info/0,
+ info/1,
+ info/2,
+ format_error/1]).
+
+-type cli_info() :: [cli_info_entry()].
+%% A list of feature flags properties, formatted for the RabbitMQ CLI.
+
+-type cli_info_entry() :: [{name, rabbit_feature_flags:feature_name()} |
+ {state, enabled | disabled | unavailable} |
+ {stability, rabbit_feature_flags:stability()} |
+ {provided_by, atom()} |
+ {desc, string()} |
+ {doc_url, string()}].
+%% A list of properties for a single feature flag, formatted for the
+%% RabbitMQ CLI.
+
+-type info_options() :: #{colors => boolean(),
+ lines => boolean(),
+ verbose => non_neg_integer()}.
+%% Options accepted by {@link info/1} and {@link info/2}.
+
+-export_type([info_options/0]).
+
+-spec cli_info() -> cli_info().
+%% @doc
+%% Returns a list of all feature flags properties.
+%%
+%% @returns the list of all feature flags properties.
+
+cli_info() ->
+ cli_info(rabbit_feature_flags:list(all)).
+
+-spec cli_info(rabbit_feature_flags:feature_flags()) -> cli_info().
+%% @doc
+%% Formats a map of feature flags and their properties into a list of
+%% feature flags properties as expected by the RabbitMQ CLI.
+%%
+%% @param FeatureFlags A map of feature flags.
+%% @returns the list of feature flags properties, created from the map
+%% specified in arguments.
+
+cli_info(FeatureFlags) ->
+ lists:foldr(
+ fun(FeatureName, Acc) ->
+ FeatureProps = maps:get(FeatureName, FeatureFlags),
+ State = rabbit_feature_flags:get_state(FeatureName),
+ Stability = rabbit_feature_flags:get_stability(FeatureProps),
+ App = maps:get(provided_by, FeatureProps),
+ Desc = maps:get(desc, FeatureProps, ""),
+ DocUrl = maps:get(doc_url, FeatureProps, ""),
+ FFInfo = [{name, FeatureName},
+ {desc, unicode:characters_to_binary(Desc)},
+ {doc_url, unicode:characters_to_binary(DocUrl)},
+ {state, State},
+ {stability, Stability},
+ {provided_by, App}],
+ [FFInfo | Acc]
+ end, [], lists:sort(maps:keys(FeatureFlags))).
+
+-spec info(info_options()) -> ok.
+%% @doc
+%% Displays an array of all supported feature flags and their properties
+%% on `stdout'.
+%%
+%% @param Options Options to tune what is displayed and how.
+
+info(Options) ->
+ %% Two tables: one for stable feature flags, one for experimental ones.
+ StableFF = rabbit_feature_flags:list(all, stable),
+ case maps:size(StableFF) of
+ 0 ->
+ ok;
+ _ ->
+ stdout_formatter:display(
+ #paragraph{content = "\n## Stable feature flags:",
+ props = #{bold => true}}),
+ info(StableFF, Options)
+ end,
+ ExpFF = rabbit_feature_flags:list(all, experimental),
+ case maps:size(ExpFF) of
+ 0 ->
+ ok;
+ _ ->
+ stdout_formatter:display(
+ #paragraph{content = "\n## Experimental feature flags:",
+ props = #{bold => true}}),
+ info(ExpFF, Options)
+ end,
+ case maps:size(StableFF) + maps:size(ExpFF) of
+ 0 -> ok;
+ _ -> state_legend(Options)
+ end.
+
+-spec info(rabbit_feature_flags:feature_flags(), info_options()) -> ok.
+%% @doc
+%% Displays an array of feature flags and their properties on `stdout',
+%% based on the specified feature flags map.
+%%
+%% @param FeatureFlags Map of the feature flags to display.
+%% @param Options Options to tune what is displayed and how.
+
+info(FeatureFlags, Options) ->
+ Verbose = maps:get(verbose, Options, 0),
+ UseColors = use_colors(Options),
+ UseLines = use_lines(Options),
+ Title = case UseColors of
+ true -> #{title => true};
+ false -> #{}
+ end,
+ Bold = case UseColors of
+ true -> #{bold => true};
+ false -> #{}
+ end,
+ {Green, Yellow, Red} = case UseColors of
+ true ->
+ {#{fg => green},
+ #{fg => yellow},
+ #{bold => true,
+ bg => red}};
+ false ->
+ {#{}, #{}, #{}}
+ end,
+ Border = case UseLines of
+ true -> #{border_drawing => ansi};
+ false -> #{border_drawing => ascii}
+ end,
+ %% Table columns:
+ %% | Name | State | Provided by | Description
+ %%
+ %% where:
+ %% State = Enabled | Disabled | Unavailable (if a node doesn't
+ %% support it).
+ TableHeader = #row{cells = ["Name",
+ "State",
+ "Provided",
+ "Description"],
+ props = Title},
+ Nodes = lists:sort([node() | rabbit_feature_flags:remote_nodes()]),
+ Rows = lists:map(
+ fun(FeatureName) ->
+ FeatureProps = maps:get(FeatureName, FeatureFlags),
+ State0 = rabbit_feature_flags:get_state(FeatureName),
+ {State, Color} = case State0 of
+ enabled ->
+ {"Enabled", Green};
+ disabled ->
+ {"Disabled", Yellow};
+ unavailable ->
+ {"Unavailable", Red}
+ end,
+ App = maps:get(provided_by, FeatureProps),
+ Desc = maps:get(desc, FeatureProps, ""),
+ VFun = fun(Node) ->
+ Supported =
+ rabbit_feature_flags:does_node_support(
+ Node, [FeatureName], 60000),
+ {Label, LabelColor} =
+ case Supported of
+ true -> {"supported", #{}};
+ false -> {"unsupported", Red}
+ end,
+ #paragraph{content =
+ [rabbit_misc:format(" ~s: ",
+ [Node]),
+ #paragraph{content = Label,
+ props = LabelColor}]}
+ end,
+ ExtraLines = if
+ Verbose > 0 ->
+ NodesList = lists:join(
+ "\n",
+ lists:map(
+ VFun, Nodes)),
+ ["\n\n",
+ "Per-node support level:\n"
+ | NodesList];
+ true ->
+ []
+ end,
+ [#paragraph{content = FeatureName,
+ props = Bold},
+ #paragraph{content = State,
+ props = Color},
+ #paragraph{content = App},
+ #paragraph{content = [Desc | ExtraLines]}]
+ end, lists:sort(maps:keys(FeatureFlags))),
+ io:format("~n", []),
+ stdout_formatter:display(#table{rows = [TableHeader | Rows],
+ props = Border#{cell_padding => {0, 1}}}).
+
+use_colors(Options) ->
+ maps:get(colors, Options, true).
+
+use_lines(Options) ->
+ maps:get(lines, Options, true).
+
+state_legend(Options) ->
+ UseColors = use_colors(Options),
+ {Green, Yellow, Red} = case UseColors of
+ true ->
+ {#{fg => green},
+ #{fg => yellow},
+ #{bold => true,
+ bg => red}};
+ false ->
+ {#{}, #{}, #{}}
+ end,
+ Enabled = #paragraph{content = "Enabled", props = Green},
+ Disabled = #paragraph{content = "Disabled", props = Yellow},
+ Unavailable = #paragraph{content = "Unavailable", props = Red},
+ stdout_formatter:display(
+ #paragraph{
+ content =
+ ["\n",
+ "Possible states:\n",
+ " ", Enabled, ": The feature flag is enabled on all nodes\n",
+ " ", Disabled, ": The feature flag is disabled on all nodes\n",
+ " ", Unavailable, ": The feature flag cannot be enabled because"
+ " one or more nodes do not support it\n"]}).
+
+-spec format_error(any()) -> string().
+%% @doc
+%% Formats the error reason term so it can be presented to human beings.
+%%
+%% @param Reason The term in the `{error, Reason}' tuple.
+%% @returns the formatted error reason.
+
+format_error(Reason) ->
+ rabbit_misc:format("~p", [Reason]).
diff --git a/deps/rabbit/src/rabbit_ff_registry.erl b/deps/rabbit/src/rabbit_ff_registry.erl
new file mode 100644
index 0000000000..372971f949
--- /dev/null
+++ b/deps/rabbit/src/rabbit_ff_registry.erl
@@ -0,0 +1,189 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @author The RabbitMQ team
+%% @copyright 2018-2020 VMware, Inc. or its affiliates.
+%%
+%% @doc
+%% This module exposes the API of the {@link rabbit_feature_flags}
+%% registry. The feature flags registry is an Erlang module, compiled at
+%% runtime, storing all the informations about feature flags: which are
+%% supported, which are enabled, etc.
+%%
+%% Because it is compiled at runtime, the initial source code is mostly
+%% an API reference. What the initial module does is merely ask {@link
+%% rabbit_feature_flags} to generate the real registry.
+
+-module(rabbit_ff_registry).
+
+-export([get/1,
+ list/1,
+ states/0,
+ is_supported/1,
+ is_enabled/1,
+ is_registry_initialized/0,
+ is_registry_written_to_disk/0]).
+
+-ifdef(TEST).
+-on_load(on_load/0).
+-endif.
+
+-spec get(rabbit_feature_flags:feature_name()) ->
+ rabbit_feature_flags:feature_props() | undefined.
+%% @doc
+%% Returns the properties of a feature flag.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @param FeatureName The name of the feature flag.
+%% @returns the properties of the specified feature flag.
+
+get(FeatureName) ->
+ rabbit_feature_flags:initialize_registry(),
+ %% Initially, is_registry_initialized/0 always returns `false`
+ %% and this ?MODULE:get(FeatureName) is always called. The case
+ %% statement is here to please Dialyzer.
+ case is_registry_initialized() of
+ false -> ?MODULE:get(FeatureName);
+ true -> undefined
+ end.
+
+-spec list(all | enabled | disabled) -> rabbit_feature_flags:feature_flags().
+%% @doc
+%% Lists all, enabled or disabled feature flags, depending on the argument.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @param Which The group of feature flags to return: `all', `enabled' or
+%% `disabled'.
+%% @returns A map of selected feature flags.
+
+list(Which) ->
+ rabbit_feature_flags:initialize_registry(),
+ %% See get/1 for an explanation of the case statement below.
+ case is_registry_initialized() of
+ false -> ?MODULE:list(Which);
+ true -> #{}
+ end.
+
+-spec states() -> rabbit_feature_flags:feature_states().
+%% @doc
+%% Returns the states of supported feature flags.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @returns A map of feature flag states.
+
+states() ->
+ rabbit_feature_flags:initialize_registry(),
+ %% See get/1 for an explanation of the case statement below.
+ case is_registry_initialized() of
+ false -> ?MODULE:states();
+ true -> #{}
+ end.
+
+-spec is_supported(rabbit_feature_flags:feature_name()) -> boolean().
+%% @doc
+%% Returns if a feature flag is supported.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @param FeatureName The name of the feature flag to be checked.
+%% @returns `true' if the feature flag is supported, or `false'
+%% otherwise.
+
+is_supported(FeatureName) ->
+ rabbit_feature_flags:initialize_registry(),
+ %% See get/1 for an explanation of the case statement below.
+ case is_registry_initialized() of
+ false -> ?MODULE:is_supported(FeatureName);
+ true -> false
+ end.
+
+-spec is_enabled(rabbit_feature_flags:feature_name()) -> boolean() | state_changing.
+%% @doc
+%% Returns if a feature flag is supported or if its state is changing.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @param FeatureName The name of the feature flag to be checked.
+%% @returns `true' if the feature flag is supported, `state_changing' if
+%% its state is transient, or `false' otherwise.
+
+is_enabled(FeatureName) ->
+ rabbit_feature_flags:initialize_registry(),
+ %% See get/1 for an explanation of the case statement below.
+ case is_registry_initialized() of
+ false -> ?MODULE:is_enabled(FeatureName);
+ true -> false
+ end.
+
+-spec is_registry_initialized() -> boolean().
+%% @doc
+%% Indicates if the registry is initialized.
+%%
+%% The registry is considered initialized once the initial Erlang module
+%% was replaced by the copy compiled at runtime.
+%%
+%% @returns `true' when the module is the one compiled at runtime,
+%% `false' when the module is the initial one compiled from RabbitMQ
+%% source code.
+
+is_registry_initialized() ->
+ always_return_false().
+
+-spec is_registry_written_to_disk() -> boolean().
+%% @doc
+%% Indicates if the feature flags state was successfully persisted to disk.
+%%
+%% Note that on startup, {@link rabbit_feature_flags} tries to determine
+%% the state of each supported feature flag, regardless of the
+%% information on disk, to ensure maximum consistency. However, this can
+%% be done for feature flags supporting it only.
+%%
+%% @returns `true' if the state was successfully written to disk and
+%% the registry can be initialized from that during the next RabbitMQ
+%% startup, `false' if the write failed and the node might loose feature
+%% flags state on restart.
+
+is_registry_written_to_disk() ->
+ always_return_true().
+
+always_return_true() ->
+ %% This function is here to trick Dialyzer. We want some functions
+ %% in this initial on-disk registry to always return `true` or
+ %% `false`. However the generated registry will return actual
+ %% booleans. The `-spec()` correctly advertises a return type of
+ %% `boolean()`. But in the meantime, Dialyzer only knows about this
+ %% copy which, without the trick below, would always return either
+ %% `true` (e.g. in is_registry_written_to_disk/0) or `false` (e.g.
+ %% is_registry_initialized/0). This obviously causes some warnings
+ %% where the registry functions are used: Dialyzer believes that
+ %% e.g. matching the return value of is_registry_initialized/0
+ %% against `true` will never succeed.
+ %%
+ %% That's why this function makes a call which we know the result,
+ %% but not Dialyzer, to "create" that hard-coded `true` return
+ %% value.
+ erlang:get({?MODULE, always_undefined}) =:= undefined.
+
+always_return_false() ->
+ not always_return_true().
+
+-ifdef(TEST).
+on_load() ->
+ _ = (catch rabbit_log_feature_flags:debug(
+ "Feature flags: Loading initial (uninitialized) registry "
+ "module (~p)",
+ [self()])),
+ ok.
+-endif.
diff --git a/deps/rabbit/src/rabbit_fhc_helpers.erl b/deps/rabbit/src/rabbit_fhc_helpers.erl
new file mode 100644
index 0000000000..d310e84008
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fhc_helpers.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_fhc_helpers).
+
+-export([clear_read_cache/0]).
+
+-include("amqqueue.hrl").
+
+clear_read_cache() ->
+ case application:get_env(rabbit, fhc_read_buffering) of
+ {ok, true} ->
+ file_handle_cache:clear_read_cache(),
+ clear_vhost_read_cache(rabbit_vhost:list_names());
+ _ -> %% undefined or {ok, false}
+ ok
+ end.
+
+clear_vhost_read_cache([]) ->
+ ok;
+clear_vhost_read_cache([VHost | Rest]) ->
+ clear_queue_read_cache(rabbit_amqqueue:list(VHost)),
+ clear_vhost_read_cache(Rest).
+
+clear_queue_read_cache([]) ->
+ ok;
+clear_queue_read_cache([Q | Rest]) when ?is_amqqueue(Q) ->
+ MPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ %% Limit the action to the current node.
+ Pids = [P || P <- [MPid | SPids], node(P) =:= node()],
+ %% This function is executed in the context of the backing queue
+ %% process because the read buffer is stored in the process
+ %% dictionary.
+ Fun = fun(_, State) ->
+ _ = file_handle_cache:clear_process_read_cache(),
+ State
+ end,
+ [rabbit_amqqueue:run_backing_queue(Pid, rabbit_variable_queue, Fun)
+ || Pid <- Pids],
+ clear_queue_read_cache(Rest).
diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl
new file mode 100644
index 0000000000..51acfffd0d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo.erl
@@ -0,0 +1,2124 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_fifo).
+
+-behaviour(ra_machine).
+
+-compile(inline_list_funcs).
+-compile(inline).
+-compile({no_auto_import, [apply/3]}).
+
+-include("rabbit_fifo.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([
+ init/1,
+ apply/3,
+ state_enter/2,
+ tick/2,
+ overview/1,
+ get_checked_out/4,
+ %% versioning
+ version/0,
+ which_module/1,
+ %% aux
+ init_aux/1,
+ handle_aux/6,
+ % queries
+ query_messages_ready/1,
+ query_messages_checked_out/1,
+ query_messages_total/1,
+ query_processes/1,
+ query_ra_indexes/1,
+ query_consumer_count/1,
+ query_consumers/1,
+ query_stat/1,
+ query_single_active_consumer/1,
+ query_in_memory_usage/1,
+ query_peek/2,
+ usage/1,
+
+ zero/1,
+
+ %% misc
+ dehydrate_state/1,
+ normalize/1,
+
+ %% protocol helpers
+ make_enqueue/3,
+ make_register_enqueuer/1,
+ make_checkout/3,
+ make_settle/2,
+ make_return/2,
+ make_discard/2,
+ make_credit/4,
+ make_purge/0,
+ make_purge_nodes/1,
+ make_update_config/1,
+ make_garbage_collection/0
+ ]).
+
+%% command records representing all the protocol actions that are supported
+-record(enqueue, {pid :: option(pid()),
+ seq :: option(msg_seqno()),
+ msg :: raw_msg()}).
+-record(register_enqueuer, {pid :: pid()}).
+-record(checkout, {consumer_id :: consumer_id(),
+ spec :: checkout_spec(),
+ meta :: consumer_meta()}).
+-record(settle, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(return, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(discard, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(credit, {consumer_id :: consumer_id(),
+ credit :: non_neg_integer(),
+ delivery_count :: non_neg_integer(),
+ drain :: boolean()}).
+-record(purge, {}).
+-record(purge_nodes, {nodes :: [node()]}).
+-record(update_config, {config :: config()}).
+-record(garbage_collection, {}).
+
+-opaque protocol() ::
+ #enqueue{} |
+ #register_enqueuer{} |
+ #checkout{} |
+ #settle{} |
+ #return{} |
+ #discard{} |
+ #credit{} |
+ #purge{} |
+ #purge_nodes{} |
+ #update_config{} |
+ #garbage_collection{}.
+
+-type command() :: protocol() | ra_machine:builtin_command().
+%% all the command types supported by ra fifo
+
+-type client_msg() :: delivery().
+%% the messages `rabbit_fifo' can send to consumers.
+
+-opaque state() :: #?MODULE{}.
+
+-export_type([protocol/0,
+ delivery/0,
+ command/0,
+ credit_mode/0,
+ consumer_tag/0,
+ consumer_meta/0,
+ consumer_id/0,
+ client_msg/0,
+ msg/0,
+ msg_id/0,
+ msg_seqno/0,
+ delivery_msg/0,
+ state/0,
+ config/0]).
+
+-spec init(config()) -> state().
+init(#{name := Name,
+ queue_resource := Resource} = Conf) ->
+ update_config(Conf, #?MODULE{cfg = #cfg{name = Name,
+ resource = Resource}}).
+
+update_config(Conf, State) ->
+ DLH = maps:get(dead_letter_handler, Conf, undefined),
+ BLH = maps:get(become_leader_handler, Conf, undefined),
+ RCI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY),
+ Overflow = maps:get(overflow_strategy, Conf, drop_head),
+ MaxLength = maps:get(max_length, Conf, undefined),
+ MaxBytes = maps:get(max_bytes, Conf, undefined),
+ MaxMemoryLength = maps:get(max_in_memory_length, Conf, undefined),
+ MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined),
+ DeliveryLimit = maps:get(delivery_limit, Conf, undefined),
+ Expires = maps:get(expires, Conf, undefined),
+ ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of
+ true ->
+ single_active;
+ false ->
+ competing
+ end,
+ Cfg = State#?MODULE.cfg,
+ RCISpec = {RCI, RCI},
+
+ LastActive = maps:get(created, Conf, undefined),
+ State#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCISpec,
+ dead_letter_handler = DLH,
+ become_leader_handler = BLH,
+ overflow_strategy = Overflow,
+ max_length = MaxLength,
+ max_bytes = MaxBytes,
+ max_in_memory_length = MaxMemoryLength,
+ max_in_memory_bytes = MaxMemoryBytes,
+ consumer_strategy = ConsumerStrategy,
+ delivery_limit = DeliveryLimit,
+ expires = Expires},
+ last_active = LastActive}.
+
+zero(_) ->
+ 0.
+
+% msg_ids are scoped per consumer
+% ra_indexes holds all raft indexes for enqueues currently on queue
+-spec apply(ra_machine:command_meta_data(), command(), state()) ->
+ {state(), Reply :: term(), ra_machine:effects()} |
+ {state(), Reply :: term()}.
+apply(Meta, #enqueue{pid = From, seq = Seq,
+ msg = RawMsg}, State00) ->
+ apply_enqueue(Meta, From, Seq, RawMsg, State00);
+apply(_Meta, #register_enqueuer{pid = Pid},
+ #?MODULE{enqueuers = Enqueuers0,
+ cfg = #cfg{overflow_strategy = Overflow}} = State0) ->
+
+ State = case maps:is_key(Pid, Enqueuers0) of
+ true ->
+ %% if the enqueuer exits just echo the overflow state
+ State0;
+ false ->
+ State0#?MODULE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}}
+ end,
+ Res = case is_over_limit(State) of
+ true when Overflow == reject_publish ->
+ reject_publish;
+ _ ->
+ ok
+ end,
+ {State, Res, [{monitor, process, Pid}]};
+apply(Meta,
+ #settle{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?MODULE{consumers = Cons0} = State) ->
+ case Cons0 of
+ #{ConsumerId := Con0} ->
+ % need to increment metrics before completing as any snapshot
+ % states taken need to include them
+ complete_and_checkout(Meta, MsgIds, ConsumerId,
+ Con0, [], State);
+ _ ->
+ {State, ok}
+
+ end;
+apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?MODULE{consumers = Cons0} = State0) ->
+ case Cons0 of
+ #{ConsumerId := Con0} ->
+ Discarded = maps:with(MsgIds, Con0#consumer.checked_out),
+ Effects = dead_letter_effects(rejected, Discarded, State0, []),
+ complete_and_checkout(Meta, MsgIds, ConsumerId, Con0,
+ Effects, State0);
+ _ ->
+ {State0, ok}
+ end;
+apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?MODULE{consumers = Cons0} = State) ->
+ case Cons0 of
+ #{ConsumerId := #consumer{checked_out = Checked0}} ->
+ Returned = maps:with(MsgIds, Checked0),
+ return(Meta, ConsumerId, Returned, [], State);
+ _ ->
+ {State, ok}
+ end;
+apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt,
+ drain = Drain, consumer_id = ConsumerId},
+ #?MODULE{consumers = Cons0,
+ service_queue = ServiceQueue0,
+ waiting_consumers = Waiting0} = State0) ->
+ case Cons0 of
+ #{ConsumerId := #consumer{delivery_count = DelCnt} = Con0} ->
+ %% this can go below 0 when credit is reduced
+ C = max(0, RemoteDelCnt + NewCredit - DelCnt),
+ %% grant the credit
+ Con1 = Con0#consumer{credit = C},
+ ServiceQueue = maybe_queue_consumer(ConsumerId, Con1,
+ ServiceQueue0),
+ Cons = maps:put(ConsumerId, Con1, Cons0),
+ {State1, ok, Effects} =
+ checkout(Meta, State0,
+ State0#?MODULE{service_queue = ServiceQueue,
+ consumers = Cons}, []),
+ Response = {send_credit_reply, messages_ready(State1)},
+ %% by this point all checkouts for the updated credit value
+ %% should be processed so we can evaluate the drain
+ case Drain of
+ false ->
+ %% just return the result of the checkout
+ {State1, Response, Effects};
+ true ->
+ Con = #consumer{credit = PostCred} =
+ maps:get(ConsumerId, State1#?MODULE.consumers),
+ %% add the outstanding credit to the delivery count
+ DeliveryCount = Con#consumer.delivery_count + PostCred,
+ Consumers = maps:put(ConsumerId,
+ Con#consumer{delivery_count = DeliveryCount,
+ credit = 0},
+ State1#?MODULE.consumers),
+ Drained = Con#consumer.credit,
+ {CTag, _} = ConsumerId,
+ {State1#?MODULE{consumers = Consumers},
+ %% returning a multi response with two client actions
+ %% for the channel to execute
+ {multi, [Response, {send_drained, {CTag, Drained}}]},
+ Effects}
+ end;
+ _ when Waiting0 /= [] ->
+ %% there are waiting consuemrs
+ case lists:keytake(ConsumerId, 1, Waiting0) of
+ {value, {_, Con0 = #consumer{delivery_count = DelCnt}}, Waiting} ->
+ %% the consumer is a waiting one
+ %% grant the credit
+ C = max(0, RemoteDelCnt + NewCredit - DelCnt),
+ Con = Con0#consumer{credit = C},
+ State = State0#?MODULE{waiting_consumers =
+ [{ConsumerId, Con} | Waiting]},
+ {State, {send_credit_reply, messages_ready(State)}};
+ false ->
+ {State0, ok}
+ end;
+ _ ->
+ %% credit for unknown consumer - just ignore
+ {State0, ok}
+ end;
+apply(_, #checkout{spec = {dequeue, _}},
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active}} = State0) ->
+ {State0, {error, {unsupported, single_active_consumer}}};
+apply(#{index := Index,
+ system_time := Ts,
+ from := From} = Meta, #checkout{spec = {dequeue, Settlement},
+ meta = ConsumerMeta,
+ consumer_id = ConsumerId},
+ #?MODULE{consumers = Consumers} = State00) ->
+ %% dequeue always updates last_active
+ State0 = State00#?MODULE{last_active = Ts},
+ %% all dequeue operations result in keeping the queue from expiring
+ Exists = maps:is_key(ConsumerId, Consumers),
+ case messages_ready(State0) of
+ 0 ->
+ {State0, {dequeue, empty}};
+ _ when Exists ->
+ %% a dequeue using the same consumer_id isn't possible at this point
+ {State0, {dequeue, empty}};
+ Ready ->
+ State1 = update_consumer(ConsumerId, ConsumerMeta,
+ {once, 1, simple_prefetch}, 0,
+ State0),
+ {success, _, MsgId, Msg, State2} = checkout_one(Meta, State1),
+ {State4, Effects1} = case Settlement of
+ unsettled ->
+ {_, Pid} = ConsumerId,
+ {State2, [{monitor, process, Pid}]};
+ settled ->
+ %% immediately settle the checkout
+ {State3, _, Effects0} =
+ apply(Meta, make_settle(ConsumerId, [MsgId]),
+ State2),
+ {State3, Effects0}
+ end,
+ {Reply, Effects2} =
+ case Msg of
+ {RaftIdx, {Header, empty}} ->
+ %% TODO add here new log effect with reply
+ {'$ra_no_reply',
+ [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) |
+ Effects1]};
+ _ ->
+ {{dequeue, {MsgId, Msg}, Ready-1}, Effects1}
+
+ end,
+
+ case evaluate_limit(Index, false, State0, State4, Effects2) of
+ {State, true, Effects} ->
+ update_smallest_raft_index(Index, Reply, State, Effects);
+ {State, false, Effects} ->
+ {State, Reply, Effects}
+ end
+ end;
+apply(Meta, #checkout{spec = cancel, consumer_id = ConsumerId}, State0) ->
+ {State, Effects} = cancel_consumer(Meta, ConsumerId, State0, [],
+ consumer_cancel),
+ checkout(Meta, State0, State, Effects);
+apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta,
+ consumer_id = {_, Pid} = ConsumerId},
+ State0) ->
+ Priority = get_priority_from_args(ConsumerMeta),
+ State1 = update_consumer(ConsumerId, ConsumerMeta, Spec, Priority, State0),
+ checkout(Meta, State0, State1, [{monitor, process, Pid}]);
+apply(#{index := Index}, #purge{},
+ #?MODULE{ra_indexes = Indexes0,
+ returns = Returns,
+ messages = Messages} = State0) ->
+ Total = messages_ready(State0),
+ Indexes1 = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0,
+ [I || {_, {I, _}} <- lqueue:to_list(Messages)]),
+ Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes1,
+ [I || {_, {I, _}} <- lqueue:to_list(Returns)]),
+
+ State1 = State0#?MODULE{ra_indexes = Indexes,
+ messages = lqueue:new(),
+ returns = lqueue:new(),
+ msg_bytes_enqueue = 0,
+ prefix_msgs = {0, [], 0, []},
+ msg_bytes_in_memory = 0,
+ msgs_ready_in_memory = 0},
+ Effects0 = [garbage_collection],
+ Reply = {purge, Total},
+ {State, _, Effects} = evaluate_limit(Index, false, State0,
+ State1, Effects0),
+ update_smallest_raft_index(Index, Reply, State, Effects);
+apply(_Meta, #garbage_collection{}, State) ->
+ {State, ok, [{aux, garbage_collection}]};
+apply(#{system_time := Ts} = Meta, {down, Pid, noconnection},
+ #?MODULE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = Waiting0,
+ enqueuers = Enqs0} = State0) ->
+ Node = node(Pid),
+ %% if the pid refers to an active or cancelled consumer,
+ %% mark it as suspected and return it to the waiting queue
+ {State1, Effects0} =
+ maps:fold(fun({_, P} = Cid, C0, {S0, E0})
+ when node(P) =:= Node ->
+ %% the consumer should be returned to waiting
+ %% and checked out messages should be returned
+ Effs = consumer_update_active_effects(
+ S0, Cid, C0, false, suspected_down, E0),
+ Checked = C0#consumer.checked_out,
+ Credit = increase_credit(C0, maps:size(Checked)),
+ {St, Effs1} = return_all(Meta, S0, Effs,
+ Cid, C0#consumer{credit = Credit}),
+ %% if the consumer was cancelled there is a chance it got
+ %% removed when returning hence we need to be defensive here
+ Waiting = case St#?MODULE.consumers of
+ #{Cid := C} ->
+ Waiting0 ++ [{Cid, C}];
+ _ ->
+ Waiting0
+ end,
+ {St#?MODULE{consumers = maps:remove(Cid, St#?MODULE.consumers),
+ waiting_consumers = Waiting,
+ last_active = Ts},
+ Effs1};
+ (_, _, S) ->
+ S
+ end, {State0, []}, Cons0),
+ WaitingConsumers = update_waiting_consumer_status(Node, State1,
+ suspected_down),
+
+ %% select a new consumer from the waiting queue and run a checkout
+ State2 = State1#?MODULE{waiting_consumers = WaitingConsumers},
+ {State, Effects1} = activate_next_consumer(State2, Effects0),
+
+ %% mark any enquers as suspected
+ Enqs = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = suspected_down};
+ (_, E) -> E
+ end, Enqs0),
+ Effects = [{monitor, node, Node} | Effects1],
+ checkout(Meta, State0, State#?MODULE{enqueuers = Enqs}, Effects);
+apply(#{system_time := Ts} = Meta, {down, Pid, noconnection},
+ #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0} = State0) ->
+ %% A node has been disconnected. This doesn't necessarily mean that
+ %% any processes on this node are down, they _may_ come back so here
+ %% we just mark them as suspected (effectively deactivated)
+ %% and return all checked out messages to the main queue for delivery to any
+ %% live consumers
+ %%
+ %% all pids for the disconnected node will be marked as suspected not just
+ %% the one we got the `down' command for
+ Node = node(Pid),
+
+ {State, Effects1} =
+ maps:fold(
+ fun({_, P} = Cid, #consumer{checked_out = Checked0,
+ status = up} = C0,
+ {St0, Eff}) when node(P) =:= Node ->
+ Credit = increase_credit(C0, map_size(Checked0)),
+ C = C0#consumer{status = suspected_down,
+ credit = Credit},
+ {St, Eff0} = return_all(Meta, St0, Eff, Cid, C),
+ Eff1 = consumer_update_active_effects(St, Cid, C, false,
+ suspected_down, Eff0),
+ {St, Eff1};
+ (_, _, {St, Eff}) ->
+ {St, Eff}
+ end, {State0, []}, Cons0),
+ Enqs = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = suspected_down};
+ (_, E) -> E
+ end, Enqs0),
+
+ % Monitor the node so that we can "unsuspect" these processes when the node
+ % comes back, then re-issue all monitors and discover the final fate of
+ % these processes
+ Effects = case maps:size(State#?MODULE.consumers) of
+ 0 ->
+ [{aux, inactive}, {monitor, node, Node}];
+ _ ->
+ [{monitor, node, Node}]
+ end ++ Effects1,
+ checkout(Meta, State0, State#?MODULE{enqueuers = Enqs,
+ last_active = Ts}, Effects);
+apply(Meta, {down, Pid, _Info}, State0) ->
+ {State, Effects} = handle_down(Meta, Pid, State0),
+ checkout(Meta, State0, State, Effects);
+apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0,
+ service_queue = _SQ0} = State0) ->
+ %% A node we are monitoring has come back.
+ %% If we have suspected any processes of being
+ %% down we should now re-issue the monitors for them to detect if they're
+ %% actually down or not
+ Monitors = [{monitor, process, P}
+ || P <- suspected_pids_for(Node, State0)],
+
+ Enqs1 = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = up};
+ (_, E) -> E
+ end, Enqs0),
+ ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0),
+ %% mark all consumers as up
+ {State1, Effects1} =
+ maps:fold(fun({_, P} = ConsumerId, C, {SAcc, EAcc})
+ when (node(P) =:= Node) and
+ (C#consumer.status =/= cancelled) ->
+ EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerId,
+ C, true, up, EAcc),
+ {update_or_remove_sub(Meta, ConsumerId,
+ C#consumer{status = up},
+ SAcc), EAcc1};
+ (_, _, Acc) ->
+ Acc
+ end, {State0, Monitors}, Cons0),
+ Waiting = update_waiting_consumer_status(Node, State1, up),
+ State2 = State1#?MODULE{
+ enqueuers = Enqs1,
+ waiting_consumers = Waiting},
+ {State, Effects} = activate_next_consumer(State2, Effects1),
+ checkout(Meta, State0, State, Effects);
+apply(_, {nodedown, _Node}, State) ->
+ {State, ok};
+apply(Meta, #purge_nodes{nodes = Nodes}, State0) ->
+ {State, Effects} = lists:foldl(fun(Node, {S, E}) ->
+ purge_node(Meta, Node, S, E)
+ end, {State0, []}, Nodes),
+ {State, ok, Effects};
+apply(Meta, #update_config{config = Conf}, State) ->
+ checkout(Meta, State, update_config(Conf, State), []);
+apply(_Meta, {machine_version, 0, 1}, V0State) ->
+ State = convert_v0_to_v1(V0State),
+ {State, ok, []}.
+
+convert_v0_to_v1(V0State0) ->
+ V0State = rabbit_fifo_v0:normalize_for_v1(V0State0),
+ V0Msgs = rabbit_fifo_v0:get_field(messages, V0State),
+ V1Msgs = lqueue:from_list(lists:sort(maps:to_list(V0Msgs))),
+ V0Enqs = rabbit_fifo_v0:get_field(enqueuers, V0State),
+ V1Enqs = maps:map(
+ fun (_EPid, E) ->
+ #enqueuer{next_seqno = element(2, E),
+ pending = element(3, E),
+ status = element(4, E)}
+ end, V0Enqs),
+ V0Cons = rabbit_fifo_v0:get_field(consumers, V0State),
+ V1Cons = maps:map(
+ fun (_CId, C0) ->
+ %% add the priority field
+ list_to_tuple(tuple_to_list(C0) ++ [0])
+ end, V0Cons),
+ V0SQ = rabbit_fifo_v0:get_field(service_queue, V0State),
+ V1SQ = priority_queue:from_list(queue:to_list(V0SQ)),
+ Cfg = #cfg{name = rabbit_fifo_v0:get_cfg_field(name, V0State),
+ resource = rabbit_fifo_v0:get_cfg_field(resource, V0State),
+ release_cursor_interval = rabbit_fifo_v0:get_cfg_field(release_cursor_interval, V0State),
+ dead_letter_handler = rabbit_fifo_v0:get_cfg_field(dead_letter_handler, V0State),
+ become_leader_handler = rabbit_fifo_v0:get_cfg_field(become_leader_handler, V0State),
+ %% TODO: what if policy enabling reject_publish was applied before conversion?
+ overflow_strategy = drop_head,
+ max_length = rabbit_fifo_v0:get_cfg_field(max_length, V0State),
+ max_bytes = rabbit_fifo_v0:get_cfg_field(max_bytes, V0State),
+ consumer_strategy = rabbit_fifo_v0:get_cfg_field(consumer_strategy, V0State),
+ delivery_limit = rabbit_fifo_v0:get_cfg_field(delivery_limit, V0State),
+ max_in_memory_length = rabbit_fifo_v0:get_cfg_field(max_in_memory_length, V0State),
+ max_in_memory_bytes = rabbit_fifo_v0:get_cfg_field(max_in_memory_bytes, V0State)
+ },
+
+ #?MODULE{cfg = Cfg,
+ messages = V1Msgs,
+ next_msg_num = rabbit_fifo_v0:get_field(next_msg_num, V0State),
+ returns = rabbit_fifo_v0:get_field(returns, V0State),
+ enqueue_count = rabbit_fifo_v0:get_field(enqueue_count, V0State),
+ enqueuers = V1Enqs,
+ ra_indexes = rabbit_fifo_v0:get_field(ra_indexes, V0State),
+ release_cursors = rabbit_fifo_v0:get_field(release_cursors, V0State),
+ consumers = V1Cons,
+ service_queue = V1SQ,
+ prefix_msgs = rabbit_fifo_v0:get_field(prefix_msgs, V0State),
+ msg_bytes_enqueue = rabbit_fifo_v0:get_field(msg_bytes_enqueue, V0State),
+ msg_bytes_checkout = rabbit_fifo_v0:get_field(msg_bytes_checkout, V0State),
+ waiting_consumers = rabbit_fifo_v0:get_field(waiting_consumers, V0State),
+ msg_bytes_in_memory = rabbit_fifo_v0:get_field(msg_bytes_in_memory, V0State),
+ msgs_ready_in_memory = rabbit_fifo_v0:get_field(msgs_ready_in_memory, V0State)
+ }.
+
+purge_node(Meta, Node, State, Effects) ->
+ lists:foldl(fun(Pid, {S0, E0}) ->
+ {S, E} = handle_down(Meta, Pid, S0),
+ {S, E0 ++ E}
+ end, {State, Effects}, all_pids_for(Node, State)).
+
+%% any downs that re not noconnection
+handle_down(Meta, Pid, #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0} = State0) ->
+ % Remove any enqueuer for the same pid and enqueue any pending messages
+ % This should be ok as we won't see any more enqueues from this pid
+ State1 = case maps:take(Pid, Enqs0) of
+ {#enqueuer{pending = Pend}, Enqs} ->
+ lists:foldl(fun ({_, RIdx, RawMsg}, S) ->
+ enqueue(RIdx, RawMsg, S)
+ end, State0#?MODULE{enqueuers = Enqs}, Pend);
+ error ->
+ State0
+ end,
+ {Effects1, State2} = handle_waiting_consumer_down(Pid, State1),
+ % return checked out messages to main queue
+ % Find the consumers for the down pid
+ DownConsumers = maps:keys(
+ maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)),
+ lists:foldl(fun(ConsumerId, {S, E}) ->
+ cancel_consumer(Meta, ConsumerId, S, E, down)
+ end, {State2, Effects1}, DownConsumers).
+
+consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = competing}}) ->
+ fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) ->
+ consumer_update_active_effects(State, ConsumerId, Consumer, Active,
+ ActivityStatus, Effects)
+ end;
+consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = single_active}}) ->
+ fun(_, _, _, _, _, Effects) ->
+ Effects
+ end.
+
+handle_waiting_consumer_down(_Pid,
+ #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State) ->
+ {[], State};
+handle_waiting_consumer_down(_Pid,
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = []} = State) ->
+ {[], State};
+handle_waiting_consumer_down(Pid,
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = WaitingConsumers0} = State0) ->
+ % get cancel effects for down waiting consumers
+ Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end,
+ WaitingConsumers0),
+ Effects = lists:foldl(fun ({ConsumerId, _}, Effects) ->
+ cancel_consumer_effects(ConsumerId, State0,
+ Effects)
+ end, [], Down),
+ % update state to have only up waiting consumers
+ StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end,
+ WaitingConsumers0),
+ State = State0#?MODULE{waiting_consumers = StillUp},
+ {Effects, State}.
+
+update_waiting_consumer_status(Node,
+ #?MODULE{waiting_consumers = WaitingConsumers},
+ Status) ->
+ [begin
+ case node(Pid) of
+ Node ->
+ {ConsumerId, Consumer#consumer{status = Status}};
+ _ ->
+ {ConsumerId, Consumer}
+ end
+ end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers,
+ Consumer#consumer.status =/= cancelled].
+
+-spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects().
+state_enter(leader, #?MODULE{consumers = Cons,
+ enqueuers = Enqs,
+ waiting_consumers = WaitingConsumers,
+ cfg = #cfg{name = Name,
+ resource = Resource,
+ become_leader_handler = BLH},
+ prefix_msgs = {0, [], 0, []}
+ }) ->
+ % return effects to monitor all current consumers and enqueuers
+ Pids = lists:usort(maps:keys(Enqs)
+ ++ [P || {_, P} <- maps:keys(Cons)]
+ ++ [P || {{_, P}, _} <- WaitingConsumers]),
+ Mons = [{monitor, process, P} || P <- Pids],
+ Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids],
+ NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]),
+ FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}],
+ Effects = Mons ++ Nots ++ NodeMons ++ FHReservation,
+ case BLH of
+ undefined ->
+ Effects;
+ {Mod, Fun, Args} ->
+ [{mod_call, Mod, Fun, Args ++ [Name]} | Effects]
+ end;
+state_enter(eol, #?MODULE{enqueuers = Enqs,
+ consumers = Custs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0),
+ WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end,
+ #{}, WaitingConsumers0),
+ AllConsumers = maps:merge(Custs, WaitingConsumers1),
+ [{send_msg, P, eol, ra_event}
+ || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++
+ [{mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}];
+state_enter(State, #?MODULE{cfg = #cfg{resource = _Resource}}) when State =/= leader ->
+ FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []},
+ [FHReservation];
+ state_enter(_, _) ->
+ %% catch all as not handling all states
+ [].
+
+
+-spec tick(non_neg_integer(), state()) -> ra_machine:effects().
+tick(Ts, #?MODULE{cfg = #cfg{name = Name,
+ resource = QName},
+ msg_bytes_enqueue = EnqueueBytes,
+ msg_bytes_checkout = CheckoutBytes} = State) ->
+ case is_expired(Ts, State) of
+ true ->
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}];
+ false ->
+ Metrics = {Name,
+ messages_ready(State),
+ num_checked_out(State), % checked out
+ messages_total(State),
+ query_consumer_count(State), % Consumers
+ EnqueueBytes,
+ CheckoutBytes},
+ [{mod_call, rabbit_quorum_queue,
+ handle_tick, [QName, Metrics, all_nodes(State)]}]
+ end.
+
+-spec overview(state()) -> map().
+overview(#?MODULE{consumers = Cons,
+ enqueuers = Enqs,
+ release_cursors = Cursors,
+ enqueue_count = EnqCount,
+ msg_bytes_enqueue = EnqueueBytes,
+ msg_bytes_checkout = CheckoutBytes,
+ cfg = Cfg} = State) ->
+ Conf = #{name => Cfg#cfg.name,
+ resource => Cfg#cfg.resource,
+ release_cursor_interval => Cfg#cfg.release_cursor_interval,
+ dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler,
+ max_length => Cfg#cfg.max_length,
+ max_bytes => Cfg#cfg.max_bytes,
+ consumer_strategy => Cfg#cfg.consumer_strategy,
+ max_in_memory_length => Cfg#cfg.max_in_memory_length,
+ max_in_memory_bytes => Cfg#cfg.max_in_memory_bytes,
+ expires => Cfg#cfg.expires,
+ delivery_limit => Cfg#cfg.delivery_limit
+ },
+ #{type => ?MODULE,
+ config => Conf,
+ num_consumers => maps:size(Cons),
+ num_checked_out => num_checked_out(State),
+ num_enqueuers => maps:size(Enqs),
+ num_ready_messages => messages_ready(State),
+ num_messages => messages_total(State),
+ num_release_cursors => lqueue:len(Cursors),
+ release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)],
+ release_cursor_enqueue_counter => EnqCount,
+ enqueue_message_bytes => EnqueueBytes,
+ checkout_message_bytes => CheckoutBytes}.
+
+-spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) ->
+ [delivery_msg()].
+get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) ->
+ case Consumers of
+ #{Cid := #consumer{checked_out = Checked}} ->
+ [{K, snd(snd(maps:get(K, Checked)))}
+ || K <- lists:seq(From, To),
+ maps:is_key(K, Checked)];
+ _ ->
+ []
+ end.
+
+-spec version() -> pos_integer().
+version() -> 1.
+
+which_module(0) -> rabbit_fifo_v0;
+which_module(1) -> ?MODULE.
+
+-record(aux_gc, {last_raft_idx = 0 :: ra:index()}).
+-record(aux, {name :: atom(),
+ utilisation :: term(),
+ gc = #aux_gc{} :: #aux_gc{}}).
+
+init_aux(Name) when is_atom(Name) ->
+ %% TODO: catch specific exception throw if table already exists
+ ok = ra_machine_ets:create_table(rabbit_fifo_usage,
+ [named_table, set, public,
+ {write_concurrency, true}]),
+ Now = erlang:monotonic_time(micro_seconds),
+ #aux{name = Name,
+ utilisation = {inactive, Now, 1, 1.0}}.
+
+handle_aux(leader, _, garbage_collection, State, Log, _MacState) ->
+ ra_log_wal:force_roll_over(ra_log_wal),
+ {no_reply, State, Log};
+handle_aux(follower, _, garbage_collection, State, Log, MacState) ->
+ ra_log_wal:force_roll_over(ra_log_wal),
+ {no_reply, force_eval_gc(Log, MacState, State), Log};
+handle_aux(_RaState, cast, eval, Aux0, Log, _MacState) ->
+ {no_reply, Aux0, Log};
+handle_aux(_RaState, cast, Cmd, #aux{utilisation = Use0} = Aux0,
+ Log, _MacState)
+ when Cmd == active orelse Cmd == inactive ->
+ {no_reply, Aux0#aux{utilisation = update_use(Use0, Cmd)}, Log};
+handle_aux(_RaState, cast, tick, #aux{name = Name,
+ utilisation = Use0} = State0,
+ Log, MacState) ->
+ true = ets:insert(rabbit_fifo_usage,
+ {Name, utilisation(Use0)}),
+ Aux = eval_gc(Log, MacState, State0),
+ {no_reply, Aux, Log};
+handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0,
+ Log0, MacState) ->
+ case rabbit_fifo:query_peek(Pos, MacState) of
+ {ok, {Idx, {Header, empty}}} ->
+ %% need to re-hydrate from the log
+ {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0),
+ #enqueue{msg = Msg} = Cmd,
+ {reply, {ok, {Header, Msg}}, Aux0, Log};
+ {ok, {_Idx, {Header, Msg}}} ->
+ {reply, {ok, {Header, Msg}}, Aux0, Log0};
+ Err ->
+ {reply, Err, Aux0, Log0}
+ end.
+
+
+eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState,
+ #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) ->
+ {Idx, _} = ra_log:last_index_term(Log),
+ {memory, Mem} = erlang:process_info(self(), memory),
+ case messages_total(MacState) of
+ 0 when Idx > LastGcIdx andalso
+ Mem > ?GC_MEM_LIMIT_B ->
+ garbage_collect(),
+ {memory, MemAfter} = erlang:process_info(self(), memory),
+ rabbit_log:debug("~s: full GC sweep complete. "
+ "Process memory changed from ~.2fMB to ~.2fMB.",
+ [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
+ AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
+ _ ->
+ AuxState
+ end.
+
+force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}},
+ #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) ->
+ {Idx, _} = ra_log:last_index_term(Log),
+ {memory, Mem} = erlang:process_info(self(), memory),
+ case Idx > LastGcIdx of
+ true ->
+ garbage_collect(),
+ {memory, MemAfter} = erlang:process_info(self(), memory),
+ rabbit_log:debug("~s: full GC sweep complete. "
+ "Process memory changed from ~.2fMB to ~.2fMB.",
+ [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
+ AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
+ false ->
+ AuxState
+ end.
+
+%%% Queries
+
+query_messages_ready(State) ->
+ messages_ready(State).
+
+query_messages_checked_out(#?MODULE{consumers = Consumers}) ->
+ maps:fold(fun (_, #consumer{checked_out = C}, S) ->
+ maps:size(C) + S
+ end, 0, Consumers).
+
+query_messages_total(State) ->
+ messages_total(State).
+
+query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) ->
+ Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0),
+ maps:keys(maps:merge(Enqs, Cons)).
+
+
+query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) ->
+ RaIndexes.
+
+query_consumer_count(#?MODULE{consumers = Consumers,
+ waiting_consumers = WaitingConsumers}) ->
+ Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) ->
+ Status =/= suspected_down
+ end, Consumers),
+ maps:size(Up) + length(WaitingConsumers).
+
+query_consumers(#?MODULE{consumers = Consumers,
+ waiting_consumers = WaitingConsumers,
+ cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) ->
+ ActiveActivityStatusFun =
+ case ConsumerStrategy of
+ competing ->
+ fun(_ConsumerId,
+ #consumer{status = Status}) ->
+ case Status of
+ suspected_down ->
+ {false, Status};
+ _ ->
+ {true, Status}
+ end
+ end;
+ single_active ->
+ SingleActiveConsumer = query_single_active_consumer(State),
+ fun({Tag, Pid} = _Consumer, _) ->
+ case SingleActiveConsumer of
+ {value, {Tag, Pid}} ->
+ {true, single_active};
+ _ ->
+ {false, waiting}
+ end
+ end
+ end,
+ FromConsumers =
+ maps:fold(fun (_, #consumer{status = cancelled}, Acc) ->
+ Acc;
+ ({Tag, Pid}, #consumer{meta = Meta} = Consumer, Acc) ->
+ {Active, ActivityStatus} =
+ ActiveActivityStatusFun({Tag, Pid}, Consumer),
+ maps:put({Tag, Pid},
+ {Pid, Tag,
+ maps:get(ack, Meta, undefined),
+ maps:get(prefetch, Meta, undefined),
+ Active,
+ ActivityStatus,
+ maps:get(args, Meta, []),
+ maps:get(username, Meta, undefined)},
+ Acc)
+ end, #{}, Consumers),
+ FromWaitingConsumers =
+ lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) ->
+ Acc;
+ ({{Tag, Pid}, #consumer{meta = Meta} = Consumer}, Acc) ->
+ {Active, ActivityStatus} =
+ ActiveActivityStatusFun({Tag, Pid}, Consumer),
+ maps:put({Tag, Pid},
+ {Pid, Tag,
+ maps:get(ack, Meta, undefined),
+ maps:get(prefetch, Meta, undefined),
+ Active,
+ ActivityStatus,
+ maps:get(args, Meta, []),
+ maps:get(username, Meta, undefined)},
+ Acc)
+ end, #{}, WaitingConsumers),
+ maps:merge(FromConsumers, FromWaitingConsumers).
+
+
+query_single_active_consumer(#?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ consumers = Consumers}) ->
+ case maps:size(Consumers) of
+ 0 ->
+ {error, no_value};
+ 1 ->
+ {value, lists:nth(1, maps:keys(Consumers))};
+ _
+ ->
+ {error, illegal_size}
+ end ;
+query_single_active_consumer(_) ->
+ disabled.
+
+query_stat(#?MODULE{consumers = Consumers} = State) ->
+ {messages_ready(State), maps:size(Consumers)}.
+
+query_in_memory_usage(#?MODULE{msg_bytes_in_memory = Bytes,
+ msgs_ready_in_memory = Length}) ->
+ {Length, Bytes}.
+
+query_peek(Pos, State0) when Pos > 0 ->
+ case take_next_msg(State0) of
+ empty ->
+ {error, no_message_at_pos};
+ {{_Seq, IdxMsg}, _State}
+ when Pos == 1 ->
+ {ok, IdxMsg};
+ {_Msg, State} ->
+ query_peek(Pos-1, State)
+ end.
+
+
+-spec usage(atom()) -> float().
+usage(Name) when is_atom(Name) ->
+ case ets:lookup(rabbit_fifo_usage, Name) of
+ [] -> 0.0;
+ [{_, Use}] -> Use
+ end.
+
+%%% Internal
+
+messages_ready(#?MODULE{messages = M,
+ prefix_msgs = {RCnt, _R, PCnt, _P},
+ returns = R}) ->
+ %% prefix messages will rarely have anything in them during normal
+ %% operations so length/1 is fine here
+ lqueue:len(M) + lqueue:len(R) + RCnt + PCnt.
+
+messages_total(#?MODULE{ra_indexes = I,
+ prefix_msgs = {RCnt, _R, PCnt, _P}}) ->
+ rabbit_fifo_index:size(I) + RCnt + PCnt.
+
+update_use({inactive, _, _, _} = CUInfo, inactive) ->
+ CUInfo;
+update_use({active, _, _} = CUInfo, active) ->
+ CUInfo;
+update_use({active, Since, Avg}, inactive) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {inactive, Now, Now - Since, Avg};
+update_use({inactive, Since, Active, Avg}, active) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {active, Now, use_avg(Active, Now - Since, Avg)}.
+
+utilisation({active, Since, Avg}) ->
+ use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg);
+utilisation({inactive, Since, Active, Avg}) ->
+ use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg).
+
+use_avg(0, 0, Avg) ->
+ Avg;
+use_avg(Active, Inactive, Avg) ->
+ Time = Inactive + Active,
+ moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg).
+
+moving_average(_Time, _, Next, undefined) ->
+ Next;
+moving_average(Time, HalfLife, Next, Current) ->
+ Weight = math:exp(Time * math:log(0.5) / HalfLife),
+ Next * (1 - Weight) + Current * Weight.
+
+num_checked_out(#?MODULE{consumers = Cons}) ->
+ maps:fold(fun (_, #consumer{checked_out = C}, Acc) ->
+ maps:size(C) + Acc
+ end, 0, Cons).
+
+cancel_consumer(Meta, ConsumerId,
+ #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State,
+ Effects, Reason) ->
+ cancel_consumer0(Meta, ConsumerId, State, Effects, Reason);
+cancel_consumer(Meta, ConsumerId,
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = []} = State,
+ Effects, Reason) ->
+ %% single active consumer on, no consumers are waiting
+ cancel_consumer0(Meta, ConsumerId, State, Effects, Reason);
+cancel_consumer(Meta, ConsumerId,
+ #?MODULE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = Waiting0} = State0,
+ Effects0, Reason) ->
+ %% single active consumer on, consumers are waiting
+ case maps:is_key(ConsumerId, Cons0) of
+ true ->
+ % The active consumer is to be removed
+ {State1, Effects1} = cancel_consumer0(Meta, ConsumerId, State0,
+ Effects0, Reason),
+ activate_next_consumer(State1, Effects1);
+ false ->
+ % The cancelled consumer is not active or cancelled
+ % Just remove it from idle_consumers
+ Waiting = lists:keydelete(ConsumerId, 1, Waiting0),
+ Effects = cancel_consumer_effects(ConsumerId, State0, Effects0),
+ % A waiting consumer isn't supposed to have any checked out messages,
+ % so nothing special to do here
+ {State0#?MODULE{waiting_consumers = Waiting}, Effects}
+ end.
+
+consumer_update_active_effects(#?MODULE{cfg = #cfg{resource = QName}},
+ ConsumerId, #consumer{meta = Meta},
+ Active, ActivityStatus,
+ Effects) ->
+ Ack = maps:get(ack, Meta, undefined),
+ Prefetch = maps:get(prefetch, Meta, undefined),
+ Args = maps:get(args, Meta, []),
+ [{mod_call, rabbit_quorum_queue, update_consumer_handler,
+ [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]}
+ | Effects].
+
+cancel_consumer0(Meta, ConsumerId,
+ #?MODULE{consumers = C0} = S0, Effects0, Reason) ->
+ case C0 of
+ #{ConsumerId := Consumer} ->
+ {S, Effects2} = maybe_return_all(Meta, ConsumerId, Consumer,
+ S0, Effects0, Reason),
+ %% The effects are emitted before the consumer is actually removed
+ %% if the consumer has unacked messages. This is a bit weird but
+ %% in line with what classic queues do (from an external point of
+ %% view)
+ Effects = cancel_consumer_effects(ConsumerId, S, Effects2),
+ case maps:size(S#?MODULE.consumers) of
+ 0 ->
+ {S, [{aux, inactive} | Effects]};
+ _ ->
+ {S, Effects}
+ end;
+ _ ->
+ %% already removed: do nothing
+ {S0, Effects0}
+ end.
+
+activate_next_consumer(#?MODULE{consumers = Cons,
+ waiting_consumers = Waiting0} = State0,
+ Effects0) ->
+ case maps:filter(fun (_, #consumer{status = S}) -> S == up end, Cons) of
+ Up when map_size(Up) == 0 ->
+ %% there are no active consumer in the consumer map
+ case lists:filter(fun ({_, #consumer{status = Status}}) ->
+ Status == up
+ end, Waiting0) of
+ [{NextConsumerId, NextConsumer} | _] ->
+ %% there is a potential next active consumer
+ Remaining = lists:keydelete(NextConsumerId, 1, Waiting0),
+ #?MODULE{service_queue = ServiceQueue} = State0,
+ ServiceQueue1 = maybe_queue_consumer(NextConsumerId,
+ NextConsumer,
+ ServiceQueue),
+ State = State0#?MODULE{consumers = Cons#{NextConsumerId => NextConsumer},
+ service_queue = ServiceQueue1,
+ waiting_consumers = Remaining},
+ Effects = consumer_update_active_effects(State, NextConsumerId,
+ NextConsumer, true,
+ single_active, Effects0),
+ {State, Effects};
+ [] ->
+ {State0, [{aux, inactive} | Effects0]}
+ end;
+ _ ->
+ {State0, Effects0}
+ end.
+
+
+
+maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, Effects0, Reason) ->
+ case Reason of
+ consumer_cancel ->
+ {update_or_remove_sub(Meta, ConsumerId,
+ Consumer#consumer{lifetime = once,
+ credit = 0,
+ status = cancelled},
+ S0), Effects0};
+ down ->
+ {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer),
+ {S1#?MODULE{consumers = maps:remove(ConsumerId, S1#?MODULE.consumers),
+ last_active = Ts},
+ Effects1}
+ end.
+
+apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) ->
+ case maybe_enqueue(RaftIdx, From, Seq, RawMsg, [], State0) of
+ {ok, State1, Effects1} ->
+ State2 = append_to_master_index(RaftIdx, State1),
+ {State, ok, Effects} = checkout(Meta, State0, State2, Effects1),
+ {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects};
+ {duplicate, State, Effects} ->
+ {State, ok, Effects}
+ end.
+
+drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) ->
+ case take_next_msg(State0) of
+ {FullMsg = {_MsgId, {RaftIdxToDrop, {Header, Msg}}},
+ State1} ->
+ Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0),
+ State2 = add_bytes_drop(Header, State1#?MODULE{ra_indexes = Indexes}),
+ State = case Msg of
+ 'empty' -> State2;
+ _ -> subtract_in_memory_counts(Header, State2)
+ end,
+ Effects = dead_letter_effects(maxlen, #{none => FullMsg},
+ State, Effects0),
+ {State, Effects};
+ {{'$prefix_msg', Header}, State1} ->
+ State2 = subtract_in_memory_counts(Header, add_bytes_drop(Header, State1)),
+ {State2, Effects0};
+ {{'$empty_msg', Header}, State1} ->
+ State2 = add_bytes_drop(Header, State1),
+ {State2, Effects0};
+ empty ->
+ {State0, Effects0}
+ end.
+
+enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages,
+ next_msg_num = NextMsgNum} = State0) ->
+ %% the initial header is an integer only - it will get expanded to a map
+ %% when the next required key is added
+ Header = message_size(RawMsg),
+ {State1, Msg} =
+ case evaluate_memory_limit(Header, State0) of
+ true ->
+ % indexed message with header map
+ {State0, {RaftIdx, {Header, 'empty'}}};
+ false ->
+ {add_in_memory_counts(Header, State0),
+ {RaftIdx, {Header, RawMsg}}} % indexed message with header map
+ end,
+ State = add_bytes_enqueue(Header, State1),
+ State#?MODULE{messages = lqueue:in({NextMsgNum, Msg}, Messages),
+ next_msg_num = NextMsgNum + 1}.
+
+append_to_master_index(RaftIdx,
+ #?MODULE{ra_indexes = Indexes0} = State0) ->
+ State = incr_enqueue_count(State0),
+ Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0),
+ State#?MODULE{ra_indexes = Indexes}.
+
+
+incr_enqueue_count(#?MODULE{enqueue_count = EC,
+ cfg = #cfg{release_cursor_interval = {_Base, C}}
+ } = State0) when EC >= C->
+ %% this will trigger a dehydrated version of the state to be stored
+ %% at this raft index for potential future snapshot generation
+ %% Q: Why don't we just stash the release cursor here?
+ %% A: Because it needs to be the very last thing we do and we
+ %% first needs to run the checkout logic.
+ State0#?MODULE{enqueue_count = 0};
+incr_enqueue_count(#?MODULE{enqueue_count = C} = State) ->
+ State#?MODULE{enqueue_count = C + 1}.
+
+maybe_store_dehydrated_state(RaftIdx,
+ #?MODULE{cfg =
+ #cfg{release_cursor_interval = {Base, _}}
+ = Cfg,
+ ra_indexes = Indexes,
+ enqueue_count = 0,
+ release_cursors = Cursors0} = State0) ->
+ case rabbit_fifo_index:exists(RaftIdx, Indexes) of
+ false ->
+ %% the incoming enqueue must already have been dropped
+ State0;
+ true ->
+ Interval = case Base of
+ 0 -> 0;
+ _ ->
+ Total = messages_total(State0),
+ min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX)
+ end,
+ State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval =
+ {Base, Interval}}},
+ Dehydrated = dehydrate_state(State),
+ Cursor = {release_cursor, RaftIdx, Dehydrated},
+ Cursors = lqueue:in(Cursor, Cursors0),
+ State#?MODULE{release_cursors = Cursors}
+ end;
+maybe_store_dehydrated_state(_RaftIdx, State) ->
+ State.
+
+enqueue_pending(From,
+ #enqueuer{next_seqno = Next,
+ pending = [{Next, RaftIdx, RawMsg} | Pending]} = Enq0,
+ State0) ->
+ State = enqueue(RaftIdx, RawMsg, State0),
+ Enq = Enq0#enqueuer{next_seqno = Next + 1, pending = Pending},
+ enqueue_pending(From, Enq, State);
+enqueue_pending(From, Enq, #?MODULE{enqueuers = Enqueuers0} = State) ->
+ State#?MODULE{enqueuers = Enqueuers0#{From => Enq}}.
+
+maybe_enqueue(RaftIdx, undefined, undefined, RawMsg, Effects, State0) ->
+ % direct enqueue without tracking
+ State = enqueue(RaftIdx, RawMsg, State0),
+ {ok, State, Effects};
+maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0,
+ #?MODULE{enqueuers = Enqueuers0} = State0) ->
+ case maps:get(From, Enqueuers0, undefined) of
+ undefined ->
+ State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}},
+ {ok, State, Effects} = maybe_enqueue(RaftIdx, From, MsgSeqNo,
+ RawMsg, Effects0, State1),
+ {ok, State, [{monitor, process, From} | Effects]};
+ #enqueuer{next_seqno = MsgSeqNo} = Enq0 ->
+ % it is the next expected seqno
+ State1 = enqueue(RaftIdx, RawMsg, State0),
+ Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1},
+ State = enqueue_pending(From, Enq, State1),
+ {ok, State, Effects0};
+ #enqueuer{next_seqno = Next,
+ pending = Pending0} = Enq0
+ when MsgSeqNo > Next ->
+ % out of order delivery
+ Pending = [{MsgSeqNo, RaftIdx, RawMsg} | Pending0],
+ Enq = Enq0#enqueuer{pending = lists:sort(Pending)},
+ {ok, State0#?MODULE{enqueuers = Enqueuers0#{From => Enq}}, Effects0};
+ #enqueuer{next_seqno = Next} when MsgSeqNo =< Next ->
+ % duplicate delivery - remove the raft index from the ra_indexes
+ % map as it was added earlier
+ {duplicate, State0, Effects0}
+ end.
+
+snd(T) ->
+ element(2, T).
+
+return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned,
+ Effects0, State0) ->
+ {State1, Effects1} = maps:fold(
+ fun(MsgId, {Tag, _} = Msg, {S0, E0})
+ when Tag == '$prefix_msg';
+ Tag == '$empty_msg'->
+ return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId);
+ (MsgId, {MsgNum, Msg}, {S0, E0}) ->
+ return_one(Meta, MsgId, MsgNum, Msg, S0, E0,
+ ConsumerId)
+ end, {State0, Effects0}, Returned),
+ State2 =
+ case State1#?MODULE.consumers of
+ #{ConsumerId := Con0} ->
+ Con = Con0#consumer{credit = increase_credit(Con0,
+ map_size(Returned))},
+ update_or_remove_sub(Meta, ConsumerId, Con, State1);
+ _ ->
+ State1
+ end,
+ {State, ok, Effects} = checkout(Meta, State0, State2, Effects1),
+ update_smallest_raft_index(IncomingRaftIdx, State, Effects).
+
+% used to processes messages that are finished
+complete(Meta, ConsumerId, Discarded,
+ #consumer{checked_out = Checked} = Con0, Effects,
+ #?MODULE{ra_indexes = Indexes0} = State0) ->
+ %% TODO optimise use of Discarded map here
+ MsgRaftIdxs = [RIdx || {_, {RIdx, _}} <- maps:values(Discarded)],
+ %% credit_mode = simple_prefetch should automatically top-up credit
+ %% as messages are simple_prefetch or otherwise returned
+ Con = Con0#consumer{checked_out = maps:without(maps:keys(Discarded), Checked),
+ credit = increase_credit(Con0, map_size(Discarded))},
+ State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0),
+ Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0,
+ MsgRaftIdxs),
+ %% TODO: use maps:fold instead
+ State2 = lists:foldl(fun({_, {_, {Header, _}}}, Acc) ->
+ add_bytes_settle(Header, Acc);
+ ({'$prefix_msg', Header}, Acc) ->
+ add_bytes_settle(Header, Acc);
+ ({'$empty_msg', Header}, Acc) ->
+ add_bytes_settle(Header, Acc)
+ end, State1, maps:values(Discarded)),
+ {State2#?MODULE{ra_indexes = Indexes}, Effects}.
+
+increase_credit(#consumer{lifetime = once,
+ credit = Credit}, _) ->
+ %% once consumers cannot increment credit
+ Credit;
+increase_credit(#consumer{lifetime = auto,
+ credit_mode = credited,
+ credit = Credit}, _) ->
+ %% credit_mode: credit also doesn't automatically increment credit
+ Credit;
+increase_credit(#consumer{credit = Current}, Credit) ->
+ Current + Credit.
+
+complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId,
+ #consumer{checked_out = Checked0} = Con0,
+ Effects0, State0) ->
+ Discarded = maps:with(MsgIds, Checked0),
+ {State2, Effects1} = complete(Meta, ConsumerId, Discarded, Con0,
+ Effects0, State0),
+ {State, ok, Effects} = checkout(Meta, State0, State2, Effects1),
+ update_smallest_raft_index(IncomingRaftIdx, State, Effects).
+
+dead_letter_effects(_Reason, _Discarded,
+ #?MODULE{cfg = #cfg{dead_letter_handler = undefined}},
+ Effects) ->
+ Effects;
+dead_letter_effects(Reason, Discarded,
+ #?MODULE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}},
+ Effects) ->
+ RaftIdxs = maps:fold(
+ fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) ->
+ [RaftIdx | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Discarded),
+ [{log, RaftIdxs,
+ fun (Log) ->
+ Lookup = maps:from_list(lists:zip(RaftIdxs, Log)),
+ DeadLetters = maps:fold(
+ fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) ->
+ {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup),
+ [{Reason, Msg} | Acc];
+ (_, {_, {_, {_Header, Msg}}}, Acc) ->
+ [{Reason, Msg} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Discarded),
+ [{mod_call, Mod, Fun, Args ++ [DeadLetters]}]
+ end} | Effects].
+
+cancel_consumer_effects(ConsumerId,
+ #?MODULE{cfg = #cfg{resource = QName}}, Effects) ->
+ [{mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [QName, ConsumerId]} | Effects].
+
+update_smallest_raft_index(Idx, State, Effects) ->
+ update_smallest_raft_index(Idx, ok, State, Effects).
+
+update_smallest_raft_index(IncomingRaftIdx, Reply,
+ #?MODULE{cfg = Cfg,
+ ra_indexes = Indexes,
+ release_cursors = Cursors0} = State0,
+ Effects) ->
+ case rabbit_fifo_index:size(Indexes) of
+ 0 ->
+ % there are no messages on queue anymore and no pending enqueues
+ % we can forward release_cursor all the way until
+ % the last received command, hooray
+ %% reset the release cursor interval
+ #cfg{release_cursor_interval = {Base, _}} = Cfg,
+ RCI = {Base, Base},
+ State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCI},
+ release_cursors = lqueue:new(),
+ enqueue_count = 0},
+ {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]};
+ _ ->
+ Smallest = rabbit_fifo_index:smallest(Indexes),
+ case find_next_cursor(Smallest, Cursors0) of
+ {empty, Cursors} ->
+ {State0#?MODULE{release_cursors = Cursors}, Reply, Effects};
+ {Cursor, Cursors} ->
+ %% we can emit a release cursor when we've passed the smallest
+ %% release cursor available.
+ {State0#?MODULE{release_cursors = Cursors}, Reply,
+ Effects ++ [Cursor]}
+ end
+ end.
+
+find_next_cursor(Idx, Cursors) ->
+ find_next_cursor(Idx, Cursors, empty).
+
+find_next_cursor(Smallest, Cursors0, Potential) ->
+ case lqueue:out(Cursors0) of
+ {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest ->
+ %% we found one but it may not be the largest one
+ find_next_cursor(Smallest, Cursors, Cursor);
+ _ ->
+ {Potential, Cursors0}
+ end.
+
+update_header(Key, UpdateFun, Default, Header)
+ when is_integer(Header) ->
+ update_header(Key, UpdateFun, Default, #{size => Header});
+update_header(Key, UpdateFun, Default, Header) ->
+ maps:update_with(Key, UpdateFun, Default, Header).
+
+
+return_one(Meta, MsgId, 0, {Tag, Header0},
+ #?MODULE{returns = Returns,
+ consumers = Consumers,
+ cfg = #cfg{delivery_limit = DeliveryLimit}} = State0,
+ Effects0, ConsumerId)
+ when Tag == '$prefix_msg'; Tag == '$empty_msg' ->
+ #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers),
+ Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0),
+ Msg0 = {Tag, Header},
+ case maps:get(delivery_count, Header) of
+ DeliveryCount when DeliveryCount > DeliveryLimit ->
+ complete(Meta, ConsumerId, #{MsgId => Msg0}, Con0, Effects0, State0);
+ _ ->
+ %% this should not affect the release cursor in any way
+ Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)},
+ {Msg, State1} = case Tag of
+ '$empty_msg' ->
+ {Msg0, State0};
+ _ -> case evaluate_memory_limit(Header, State0) of
+ true ->
+ {{'$empty_msg', Header}, State0};
+ false ->
+ {Msg0, add_in_memory_counts(Header, State0)}
+ end
+ end,
+ {add_bytes_return(
+ Header,
+ State1#?MODULE{consumers = Consumers#{ConsumerId => Con},
+ returns = lqueue:in(Msg, Returns)}),
+ Effects0}
+ end;
+return_one(Meta, MsgId, MsgNum, {RaftId, {Header0, RawMsg}},
+ #?MODULE{returns = Returns,
+ consumers = Consumers,
+ cfg = #cfg{delivery_limit = DeliveryLimit}} = State0,
+ Effects0, ConsumerId) ->
+ #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers),
+ Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0),
+ Msg0 = {RaftId, {Header, RawMsg}},
+ case maps:get(delivery_count, Header) of
+ DeliveryCount when DeliveryCount > DeliveryLimit ->
+ DlMsg = {MsgNum, Msg0},
+ Effects = dead_letter_effects(delivery_limit, #{none => DlMsg},
+ State0, Effects0),
+ complete(Meta, ConsumerId, #{MsgId => DlMsg}, Con0, Effects, State0);
+ _ ->
+ Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)},
+ %% this should not affect the release cursor in any way
+ {Msg, State1} = case RawMsg of
+ 'empty' ->
+ {Msg0, State0};
+ _ ->
+ case evaluate_memory_limit(Header, State0) of
+ true ->
+ {{RaftId, {Header, 'empty'}}, State0};
+ false ->
+ {Msg0, add_in_memory_counts(Header, State0)}
+ end
+ end,
+ {add_bytes_return(
+ Header,
+ State1#?MODULE{consumers = Consumers#{ConsumerId => Con},
+ returns = lqueue:in({MsgNum, Msg}, Returns)}),
+ Effects0}
+ end.
+
+return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId,
+ #consumer{checked_out = Checked0} = Con) ->
+ %% need to sort the list so that we return messages in the order
+ %% they were checked out
+ Checked = lists:sort(maps:to_list(Checked0)),
+ State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}},
+ lists:foldl(fun ({MsgId, {'$prefix_msg', _} = Msg}, {S, E}) ->
+ return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId);
+ ({MsgId, {'$empty_msg', _} = Msg}, {S, E}) ->
+ return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId);
+ ({MsgId, {MsgNum, Msg}}, {S, E}) ->
+ return_one(Meta, MsgId, MsgNum, Msg, S, E, ConsumerId)
+ end, {State, Effects0}, Checked).
+
+%% checkout new messages to consumers
+checkout(#{index := Index} = Meta, OldState, State0, Effects0) ->
+ {State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0),
+ Effects0, {#{}, #{}}),
+ case evaluate_limit(Index, false, OldState, State1, Effects1) of
+ {State, true, Effects} ->
+ update_smallest_raft_index(Index, State, Effects);
+ {State, false, Effects} ->
+ {State, ok, Effects}
+ end.
+
+checkout0(Meta, {success, ConsumerId, MsgId, {RaftIdx, {Header, 'empty'}}, State},
+ Effects, {SendAcc, LogAcc0}) ->
+ DelMsg = {RaftIdx, {MsgId, Header}},
+ LogAcc = maps:update_with(ConsumerId,
+ fun (M) -> [DelMsg | M] end,
+ [DelMsg], LogAcc0),
+ checkout0(Meta, checkout_one(Meta, State), Effects, {SendAcc, LogAcc});
+checkout0(Meta, {success, ConsumerId, MsgId, Msg, State}, Effects,
+ {SendAcc0, LogAcc}) ->
+ DelMsg = {MsgId, Msg},
+ SendAcc = maps:update_with(ConsumerId,
+ fun (M) -> [DelMsg | M] end,
+ [DelMsg], SendAcc0),
+ checkout0(Meta, checkout_one(Meta, State), Effects, {SendAcc, LogAcc});
+checkout0(_Meta, {Activity, State0}, Effects0, {SendAcc, LogAcc}) ->
+ Effects1 = case Activity of
+ nochange ->
+ append_send_msg_effects(
+ append_log_effects(Effects0, LogAcc), SendAcc);
+ inactive ->
+ [{aux, inactive}
+ | append_send_msg_effects(
+ append_log_effects(Effects0, LogAcc), SendAcc)]
+ end,
+ {State0, ok, lists:reverse(Effects1)}.
+
+evaluate_limit(_Index, Result, _BeforeState,
+ #?MODULE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}} = State,
+ Effects) ->
+ {State, Result, Effects};
+evaluate_limit(Index, Result, BeforeState,
+ #?MODULE{cfg = #cfg{overflow_strategy = Strategy},
+ enqueuers = Enqs0} = State0,
+ Effects0) ->
+ case is_over_limit(State0) of
+ true when Strategy == drop_head ->
+ {State, Effects} = drop_head(State0, Effects0),
+ evaluate_limit(Index, true, BeforeState, State, Effects);
+ true when Strategy == reject_publish ->
+ %% generate send_msg effect for each enqueuer to let them know
+ %% they need to block
+ {Enqs, Effects} =
+ maps:fold(
+ fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) ->
+ E = E0#enqueuer{blocked = Index},
+ {Enqs#{P => E},
+ [{send_msg, P, {queue_status, reject_publish},
+ [ra_event]} | Acc]};
+ (_P, _E, Acc) ->
+ Acc
+ end, {Enqs0, Effects0}, Enqs0),
+ {State0#?MODULE{enqueuers = Enqs}, Result, Effects};
+ false when Strategy == reject_publish ->
+ %% TODO: optimise as this case gets called for every command
+ %% pretty much
+ Before = is_below_soft_limit(BeforeState),
+ case {Before, is_below_soft_limit(State0)} of
+ {false, true} ->
+ %% we have moved below the lower limit which
+ {Enqs, Effects} =
+ maps:fold(
+ fun (P, #enqueuer{} = E0, {Enqs, Acc}) ->
+ E = E0#enqueuer{blocked = undefined},
+ {Enqs#{P => E},
+ [{send_msg, P, {queue_status, go}, [ra_event]}
+ | Acc]};
+ (_P, _E, Acc) ->
+ Acc
+ end, {Enqs0, Effects0}, Enqs0),
+ {State0#?MODULE{enqueuers = Enqs}, Result, Effects};
+ _ ->
+ {State0, Result, Effects0}
+ end;
+ false ->
+ {State0, Result, Effects0}
+ end.
+
+evaluate_memory_limit(_Header,
+ #?MODULE{cfg = #cfg{max_in_memory_length = undefined,
+ max_in_memory_bytes = undefined}}) ->
+ false;
+evaluate_memory_limit(#{size := Size}, State) ->
+ evaluate_memory_limit(Size, State);
+evaluate_memory_limit(Size,
+ #?MODULE{cfg = #cfg{max_in_memory_length = MaxLength,
+ max_in_memory_bytes = MaxBytes},
+ msg_bytes_in_memory = Bytes,
+ msgs_ready_in_memory = Length})
+ when is_integer(Size) ->
+ (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes).
+
+append_send_msg_effects(Effects, AccMap) when map_size(AccMap) == 0 ->
+ Effects;
+append_send_msg_effects(Effects0, AccMap) ->
+ Effects = maps:fold(fun (C, Msgs, Ef) ->
+ [send_msg_effect(C, lists:reverse(Msgs)) | Ef]
+ end, Effects0, AccMap),
+ [{aux, active} | Effects].
+
+append_log_effects(Effects0, AccMap) ->
+ maps:fold(fun (C, Msgs, Ef) ->
+ [send_log_effect(C, lists:reverse(Msgs)) | Ef]
+ end, Effects0, AccMap).
+
+%% next message is determined as follows:
+%% First we check if there are are prefex returns
+%% Then we check if there are current returns
+%% then we check prefix msgs
+%% then we check current messages
+%%
+%% When we return it is always done to the current return queue
+%% for both prefix messages and current messages
+take_next_msg(#?MODULE{prefix_msgs = {R, P}} = State) ->
+ %% conversion
+ take_next_msg(State#?MODULE{prefix_msgs = {length(R), R, length(P), P}});
+take_next_msg(#?MODULE{prefix_msgs = {NumR, [{'$empty_msg', _} = Msg | Rem],
+ NumP, P}} = State) ->
+ %% there are prefix returns, these should be served first
+ {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}};
+take_next_msg(#?MODULE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) ->
+ %% there are prefix returns, these should be served first
+ {{'$prefix_msg', Header},
+ State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}};
+take_next_msg(#?MODULE{returns = Returns,
+ messages = Messages0,
+ prefix_msgs = {NumR, R, NumP, P}} = State) ->
+ %% use peek rather than out there as the most likely case is an empty
+ %% queue
+ case lqueue:peek(Returns) of
+ {value, NextMsg} ->
+ {NextMsg,
+ State#?MODULE{returns = lqueue:drop(Returns)}};
+ empty when P == [] ->
+ case lqueue:out(Messages0) of
+ {empty, _} ->
+ empty;
+ {{value, {_, _} = SeqMsg}, Messages} ->
+ {SeqMsg, State#?MODULE{messages = Messages }}
+ end;
+ empty ->
+ [Msg | Rem] = P,
+ case Msg of
+ {Header, 'empty'} ->
+ %% There are prefix msgs
+ {{'$empty_msg', Header},
+ State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}};
+ Header ->
+ {{'$prefix_msg', Header},
+ State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}
+ end
+ end.
+
+send_msg_effect({CTag, CPid}, Msgs) ->
+ {send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}.
+
+send_log_effect({CTag, CPid}, IdxMsgs) ->
+ {RaftIdxs, Data} = lists:unzip(IdxMsgs),
+ {log, RaftIdxs,
+ fun(Log) ->
+ Msgs = lists:zipwith(fun ({enqueue, _, _, Msg}, {MsgId, Header}) ->
+ {MsgId, {Header, Msg}}
+ end, Log, Data),
+ [{send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}]
+ end,
+ {local, node(CPid)}}.
+
+reply_log_effect(RaftIdx, MsgId, Header, Ready, From) ->
+ {log, [RaftIdx],
+ fun([{enqueue, _, _, Msg}]) ->
+ [{reply, From, {wrap_reply,
+ {dequeue, {MsgId, {Header, Msg}}, Ready}}}]
+ end}.
+
+checkout_one(Meta, #?MODULE{service_queue = SQ0,
+ messages = Messages0,
+ consumers = Cons0} = InitState) ->
+ case priority_queue:out(SQ0) of
+ {{value, ConsumerId}, SQ1} ->
+ case take_next_msg(InitState) of
+ {ConsumerMsg, State0} ->
+ %% there are consumers waiting to be serviced
+ %% process consumer checkout
+ case maps:find(ConsumerId, Cons0) of
+ {ok, #consumer{credit = 0}} ->
+ %% no credit but was still on queue
+ %% can happen when draining
+ %% recurse without consumer on queue
+ checkout_one(Meta, InitState#?MODULE{service_queue = SQ1});
+ {ok, #consumer{status = cancelled}} ->
+ checkout_one(Meta, InitState#?MODULE{service_queue = SQ1});
+ {ok, #consumer{status = suspected_down}} ->
+ checkout_one(Meta, InitState#?MODULE{service_queue = SQ1});
+ {ok, #consumer{checked_out = Checked0,
+ next_msg_id = Next,
+ credit = Credit,
+ delivery_count = DelCnt} = Con0} ->
+ Checked = maps:put(Next, ConsumerMsg, Checked0),
+ Con = Con0#consumer{checked_out = Checked,
+ next_msg_id = Next + 1,
+ credit = Credit - 1,
+ delivery_count = DelCnt + 1},
+ State1 = update_or_remove_sub(Meta,
+ ConsumerId, Con,
+ State0#?MODULE{service_queue = SQ1}),
+ {State, Msg} =
+ case ConsumerMsg of
+ {'$prefix_msg', Header} ->
+ {subtract_in_memory_counts(
+ Header, add_bytes_checkout(Header, State1)),
+ ConsumerMsg};
+ {'$empty_msg', Header} ->
+ {add_bytes_checkout(Header, State1),
+ ConsumerMsg};
+ {_, {_, {Header, 'empty'}} = M} ->
+ {add_bytes_checkout(Header, State1),
+ M};
+ {_, {_, {Header, _} = M}} ->
+ {subtract_in_memory_counts(
+ Header,
+ add_bytes_checkout(Header, State1)),
+ M}
+ end,
+ {success, ConsumerId, Next, Msg, State};
+ error ->
+ %% consumer did not exist but was queued, recurse
+ checkout_one(Meta, InitState#?MODULE{service_queue = SQ1})
+ end;
+ empty ->
+ {nochange, InitState}
+ end;
+ {empty, _} ->
+ case lqueue:len(Messages0) of
+ 0 -> {nochange, InitState};
+ _ -> {inactive, InitState}
+ end
+ end.
+
+update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto,
+ credit = 0} = Con,
+ #?MODULE{consumers = Cons} = State) ->
+ State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)};
+update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto} = Con,
+ #?MODULE{consumers = Cons,
+ service_queue = ServiceQueue} = State) ->
+ State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons),
+ service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)};
+update_or_remove_sub(#{system_time := Ts},
+ ConsumerId, #consumer{lifetime = once,
+ checked_out = Checked,
+ credit = 0} = Con,
+ #?MODULE{consumers = Cons} = State) ->
+ case maps:size(Checked) of
+ 0 ->
+ % we're done with this consumer
+ State#?MODULE{consumers = maps:remove(ConsumerId, Cons),
+ last_active = Ts};
+ _ ->
+ % there are unsettled items so need to keep around
+ State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)}
+ end;
+update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = once} = Con,
+ #?MODULE{consumers = Cons,
+ service_queue = ServiceQueue} = State) ->
+ State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons),
+ service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}.
+
+uniq_queue_in(Key, #consumer{priority = P}, Queue) ->
+ % TODO: queue:member could surely be quite expensive, however the practical
+ % number of unique consumers may not be large enough for it to matter
+ case priority_queue:member(Key, Queue) of
+ true ->
+ Queue;
+ false ->
+ priority_queue:in(Key, P, Queue)
+ end.
+
+update_consumer(ConsumerId, Meta, Spec, Priority,
+ #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State0) ->
+ %% general case, single active consumer off
+ update_consumer0(ConsumerId, Meta, Spec, Priority, State0);
+update_consumer(ConsumerId, Meta, Spec, Priority,
+ #?MODULE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active}} = State0)
+ when map_size(Cons0) == 0 ->
+ %% single active consumer on, no one is consuming yet
+ update_consumer0(ConsumerId, Meta, Spec, Priority, State0);
+update_consumer(ConsumerId, Meta, {Life, Credit, Mode}, Priority,
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = WaitingConsumers0} = State0) ->
+ %% single active consumer on and one active consumer already
+ %% adding the new consumer to the waiting list
+ Consumer = #consumer{lifetime = Life, meta = Meta,
+ priority = Priority,
+ credit = Credit, credit_mode = Mode},
+ WaitingConsumers1 = WaitingConsumers0 ++ [{ConsumerId, Consumer}],
+ State0#?MODULE{waiting_consumers = WaitingConsumers1}.
+
+update_consumer0(ConsumerId, Meta, {Life, Credit, Mode}, Priority,
+ #?MODULE{consumers = Cons0,
+ service_queue = ServiceQueue0} = State0) ->
+ %% TODO: this logic may not be correct for updating a pre-existing consumer
+ Init = #consumer{lifetime = Life, meta = Meta,
+ priority = Priority,
+ credit = Credit, credit_mode = Mode},
+ Cons = maps:update_with(ConsumerId,
+ fun(S) ->
+ %% remove any in-flight messages from
+ %% the credit update
+ N = maps:size(S#consumer.checked_out),
+ C = max(0, Credit - N),
+ S#consumer{lifetime = Life, credit = C}
+ end, Init, Cons0),
+ ServiceQueue = maybe_queue_consumer(ConsumerId, maps:get(ConsumerId, Cons),
+ ServiceQueue0),
+ State0#?MODULE{consumers = Cons, service_queue = ServiceQueue}.
+
+maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con,
+ ServiceQueue0) ->
+ case Credit > 0 of
+ true ->
+ % consumerect needs service - check if already on service queue
+ uniq_queue_in(ConsumerId, Con, ServiceQueue0);
+ false ->
+ ServiceQueue0
+ end.
+
+%% creates a dehydrated version of the current state to be cached and
+%% potentially used to for a snaphot at a later point
+dehydrate_state(#?MODULE{messages = Messages,
+ consumers = Consumers,
+ returns = Returns,
+ prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0},
+ waiting_consumers = Waiting0} = State) ->
+ RCnt = lqueue:len(Returns),
+ %% TODO: optimise this function as far as possible
+ PrefRet1 = lists:foldr(fun ({'$prefix_msg', Header}, Acc) ->
+ [Header | Acc];
+ ({'$empty_msg', _} = Msg, Acc) ->
+ [Msg | Acc];
+ ({_, {_, {Header, 'empty'}}}, Acc) ->
+ [{'$empty_msg', Header} | Acc];
+ ({_, {_, {Header, _}}}, Acc) ->
+ [Header | Acc]
+ end,
+ [],
+ lqueue:to_list(Returns)),
+ PrefRet = PrefRet0 ++ PrefRet1,
+ PrefMsgsSuff = dehydrate_messages(Messages, []),
+ %% prefix messages are not populated in normal operation only after
+ %% recovering from a snapshot
+ PrefMsgs = PrefMsg0 ++ PrefMsgsSuff,
+ Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0],
+ State#?MODULE{messages = lqueue:new(),
+ ra_indexes = rabbit_fifo_index:empty(),
+ release_cursors = lqueue:new(),
+ consumers = maps:map(fun (_, C) ->
+ dehydrate_consumer(C)
+ end, Consumers),
+ returns = lqueue:new(),
+ prefix_msgs = {PRCnt + RCnt, PrefRet,
+ PPCnt + lqueue:len(Messages), PrefMsgs},
+ waiting_consumers = Waiting}.
+
+%% TODO make body recursive to avoid allocating lists:reverse call
+dehydrate_messages(Msgs0, Acc0) ->
+ {OutRes, Msgs} = lqueue:out(Msgs0),
+ case OutRes of
+ {value, {_MsgId, {_RaftId, {_, 'empty'} = Msg}}} ->
+ dehydrate_messages(Msgs, [Msg | Acc0]);
+ {value, {_MsgId, {_RaftId, {Header, _}}}} ->
+ dehydrate_messages(Msgs, [Header | Acc0]);
+ empty ->
+ lists:reverse(Acc0)
+ end.
+
+dehydrate_consumer(#consumer{checked_out = Checked0} = Con) ->
+ Checked = maps:map(fun (_, {'$prefix_msg', _} = M) ->
+ M;
+ (_, {'$empty_msg', _} = M) ->
+ M;
+ (_, {_, {_, {Header, 'empty'}}}) ->
+ {'$empty_msg', Header};
+ (_, {_, {_, {Header, _}}}) ->
+ {'$prefix_msg', Header}
+ end, Checked0),
+ Con#consumer{checked_out = Checked}.
+
+%% make the state suitable for equality comparison
+normalize(#?MODULE{messages = Messages,
+ release_cursors = Cursors} = State) ->
+ State#?MODULE{messages = lqueue:from_list(lqueue:to_list(Messages)),
+ release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}.
+
+is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}}) ->
+ false;
+is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength,
+ max_bytes = MaxBytes},
+ msg_bytes_enqueue = BytesEnq} = State) ->
+ messages_ready(State) > MaxLength orelse (BytesEnq > MaxBytes).
+
+is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}}) ->
+ false;
+is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = MaxLength,
+ max_bytes = MaxBytes},
+ msg_bytes_enqueue = BytesEnq} = State) ->
+ is_below(MaxLength, messages_ready(State)) andalso
+ is_below(MaxBytes, BytesEnq).
+
+is_below(undefined, _Num) ->
+ true;
+is_below(Val, Num) when is_integer(Val) andalso is_integer(Num) ->
+ Num =< trunc(Val * ?LOW_LIMIT).
+
+-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol().
+make_enqueue(Pid, Seq, Msg) ->
+ #enqueue{pid = Pid, seq = Seq, msg = Msg}.
+
+-spec make_register_enqueuer(pid()) -> protocol().
+make_register_enqueuer(Pid) ->
+ #register_enqueuer{pid = Pid}.
+
+-spec make_checkout(consumer_id(),
+ checkout_spec(), consumer_meta()) -> protocol().
+make_checkout(ConsumerId, Spec, Meta) ->
+ #checkout{consumer_id = ConsumerId,
+ spec = Spec, meta = Meta}.
+
+-spec make_settle(consumer_id(), [msg_id()]) -> protocol().
+make_settle(ConsumerId, MsgIds) when is_list(MsgIds) ->
+ #settle{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_return(consumer_id(), [msg_id()]) -> protocol().
+make_return(ConsumerId, MsgIds) ->
+ #return{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_discard(consumer_id(), [msg_id()]) -> protocol().
+make_discard(ConsumerId, MsgIds) ->
+ #discard{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_credit(consumer_id(), non_neg_integer(), non_neg_integer(),
+ boolean()) -> protocol().
+make_credit(ConsumerId, Credit, DeliveryCount, Drain) ->
+ #credit{consumer_id = ConsumerId,
+ credit = Credit,
+ delivery_count = DeliveryCount,
+ drain = Drain}.
+
+-spec make_purge() -> protocol().
+make_purge() -> #purge{}.
+
+-spec make_garbage_collection() -> protocol().
+make_garbage_collection() -> #garbage_collection{}.
+
+-spec make_purge_nodes([node()]) -> protocol().
+make_purge_nodes(Nodes) ->
+ #purge_nodes{nodes = Nodes}.
+
+-spec make_update_config(config()) -> protocol().
+make_update_config(Config) ->
+ #update_config{config = Config}.
+
+add_bytes_enqueue(Bytes,
+ #?MODULE{msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_enqueue = Enqueue + Bytes};
+add_bytes_enqueue(#{size := Bytes}, State) ->
+ add_bytes_enqueue(Bytes, State).
+
+add_bytes_drop(Bytes,
+ #?MODULE{msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_enqueue = Enqueue - Bytes};
+add_bytes_drop(#{size := Bytes}, State) ->
+ add_bytes_drop(Bytes, State).
+
+add_bytes_checkout(Bytes,
+ #?MODULE{msg_bytes_checkout = Checkout,
+ msg_bytes_enqueue = Enqueue } = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_checkout = Checkout + Bytes,
+ msg_bytes_enqueue = Enqueue - Bytes};
+add_bytes_checkout(#{size := Bytes}, State) ->
+ add_bytes_checkout(Bytes, State).
+
+add_bytes_settle(Bytes,
+ #?MODULE{msg_bytes_checkout = Checkout} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_checkout = Checkout - Bytes};
+add_bytes_settle(#{size := Bytes}, State) ->
+ add_bytes_settle(Bytes, State).
+
+add_bytes_return(Bytes,
+ #?MODULE{msg_bytes_checkout = Checkout,
+ msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_checkout = Checkout - Bytes,
+ msg_bytes_enqueue = Enqueue + Bytes};
+add_bytes_return(#{size := Bytes}, State) ->
+ add_bytes_return(Bytes, State).
+
+add_in_memory_counts(Bytes,
+ #?MODULE{msg_bytes_in_memory = InMemoryBytes,
+ msgs_ready_in_memory = InMemoryCount} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Bytes,
+ msgs_ready_in_memory = InMemoryCount + 1};
+add_in_memory_counts(#{size := Bytes}, State) ->
+ add_in_memory_counts(Bytes, State).
+
+subtract_in_memory_counts(Bytes,
+ #?MODULE{msg_bytes_in_memory = InMemoryBytes,
+ msgs_ready_in_memory = InMemoryCount} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Bytes,
+ msgs_ready_in_memory = InMemoryCount - 1};
+subtract_in_memory_counts(#{size := Bytes}, State) ->
+ subtract_in_memory_counts(Bytes, State).
+
+message_size(#basic_message{content = Content}) ->
+ #content{payload_fragments_rev = PFR} = Content,
+ iolist_size(PFR);
+message_size({'$prefix_msg', H}) ->
+ get_size_from_header(H);
+message_size({'$empty_msg', H}) ->
+ get_size_from_header(H);
+message_size(B) when is_binary(B) ->
+ byte_size(B);
+message_size(Msg) ->
+ %% probably only hit this for testing so ok to use erts_debug
+ erts_debug:size(Msg).
+
+get_size_from_header(Size) when is_integer(Size) ->
+ Size;
+get_size_from_header(#{size := B}) ->
+ B.
+
+
+all_nodes(#?MODULE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Nodes0 = maps:fold(fun({_, P}, _, Acc) ->
+ Acc#{node(P) => ok}
+ end, #{}, Cons0),
+ Nodes1 = maps:fold(fun(P, _, Acc) ->
+ Acc#{node(P) => ok}
+ end, Nodes0, Enqs0),
+ maps:keys(
+ lists:foldl(fun({{_, P}, _}, Acc) ->
+ Acc#{node(P) => ok}
+ end, Nodes1, WaitingConsumers0)).
+
+all_pids_for(Node, #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Cons = maps:fold(fun({_, P}, _, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, [], Cons0),
+ Enqs = maps:fold(fun(P, _, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, Cons, Enqs0),
+ lists:foldl(fun({{_, P}, _}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, Acc) -> Acc
+ end, Enqs, WaitingConsumers0).
+
+suspected_pids_for(Node, #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Cons = maps:fold(fun({_, P}, #consumer{status = suspected_down}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, [], Cons0),
+ Enqs = maps:fold(fun(P, #enqueuer{status = suspected_down}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, Cons, Enqs0),
+ lists:foldl(fun({{_, P},
+ #consumer{status = suspected_down}}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, Acc) -> Acc
+ end, Enqs, WaitingConsumers0).
+
+is_expired(Ts, #?MODULE{cfg = #cfg{expires = Expires},
+ last_active = LastActive,
+ consumers = Consumers})
+ when is_number(LastActive) andalso is_number(Expires) ->
+ %% TODO: should it be active consumers?
+ Active = maps:filter(fun (_, #consumer{status = suspected_down}) ->
+ false;
+ (_, _) ->
+ true
+ end, Consumers),
+
+ Ts > (LastActive + Expires) andalso maps:size(Active) == 0;
+is_expired(_Ts, _State) ->
+ false.
+
+get_priority_from_args(#{args := Args}) ->
+ case rabbit_misc:table_lookup(Args, <<"x-priority">>) of
+ {_Key, Value} ->
+ Value;
+ _ -> 0
+ end;
+get_priority_from_args(_) ->
+ 0.
diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl
new file mode 100644
index 0000000000..a63483becd
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo.hrl
@@ -0,0 +1,210 @@
+
+-type option(T) :: undefined | T.
+
+-type raw_msg() :: term().
+%% The raw message. It is opaque to rabbit_fifo.
+
+-type msg_in_id() :: non_neg_integer().
+% a queue scoped monotonically incrementing integer used to enforce order
+% in the unassigned messages map
+
+-type msg_id() :: non_neg_integer().
+%% A consumer-scoped monotonically incrementing integer included with a
+%% {@link delivery/0.}. Used to settle deliveries using
+%% {@link rabbit_fifo_client:settle/3.}
+
+-type msg_seqno() :: non_neg_integer().
+%% A sender process scoped monotonically incrementing integer included
+%% in enqueue messages. Used to ensure ordering of messages send from the
+%% same process
+
+-type msg_header() :: msg_size() |
+ #{size := msg_size(),
+ delivery_count => non_neg_integer()}.
+%% The message header:
+%% delivery_count: the number of unsuccessful delivery attempts.
+%% A non-zero value indicates a previous attempt.
+%% If it only contains the size it can be condensed to an integer only
+
+-type msg() :: {msg_header(), raw_msg()}.
+%% message with a header map.
+
+-type msg_size() :: non_neg_integer().
+%% the size in bytes of the msg payload
+
+-type indexed_msg() :: {ra:index(), msg()}.
+
+-type prefix_msg() :: {'$prefix_msg', msg_header()}.
+
+-type delivery_msg() :: {msg_id(), msg()}.
+%% A tuple consisting of the message id and the headered message.
+
+-type consumer_tag() :: binary().
+%% An arbitrary binary tag used to distinguish between different consumers
+%% set up by the same process. See: {@link rabbit_fifo_client:checkout/3.}
+
+-type delivery() :: {delivery, consumer_tag(), [delivery_msg()]}.
+%% Represents the delivery of one or more rabbit_fifo messages.
+
+-type consumer_id() :: {consumer_tag(), pid()}.
+%% The entity that receives messages. Uniquely identifies a consumer.
+
+-type credit_mode() :: simple_prefetch | credited.
+%% determines how credit is replenished
+
+-type checkout_spec() :: {once | auto, Num :: non_neg_integer(),
+ credit_mode()} |
+ {dequeue, settled | unsettled} |
+ cancel.
+
+-type consumer_meta() :: #{ack => boolean(),
+ username => binary(),
+ prefetch => non_neg_integer(),
+ args => list()}.
+%% static meta data associated with a consumer
+
+
+-type applied_mfa() :: {module(), atom(), list()}.
+% represents a partially applied module call
+
+-define(RELEASE_CURSOR_EVERY, 2048).
+-define(RELEASE_CURSOR_EVERY_MAX, 3200000).
+-define(USE_AVG_HALF_LIFE, 10000.0).
+%% an average QQ without any message uses about 100KB so setting this limit
+%% to ~10 times that should be relatively safe.
+-define(GC_MEM_LIMIT_B, 2000000).
+
+-define(MB, 1048576).
+-define(LOW_LIMIT, 0.8).
+
+-record(consumer,
+ {meta = #{} :: consumer_meta(),
+ checked_out = #{} :: #{msg_id() => {msg_in_id(), indexed_msg()}},
+ next_msg_id = 0 :: msg_id(), % part of snapshot data
+ %% max number of messages that can be sent
+ %% decremented for each delivery
+ credit = 0 : non_neg_integer(),
+ %% total number of checked out messages - ever
+ %% incremented for each delivery
+ delivery_count = 0 :: non_neg_integer(),
+ %% the mode of how credit is incremented
+ %% simple_prefetch: credit is re-filled as deliveries are settled
+ %% or returned.
+ %% credited: credit can only be changed by receiving a consumer_credit
+ %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}'
+ credit_mode = simple_prefetch :: credit_mode(), % part of snapshot data
+ lifetime = once :: once | auto,
+ status = up :: up | suspected_down | cancelled,
+ priority = 0 :: non_neg_integer()
+ }).
+
+-type consumer() :: #consumer{}.
+
+-type consumer_strategy() :: competing | single_active.
+
+-type milliseconds() :: non_neg_integer().
+
+-record(enqueuer,
+ {next_seqno = 1 :: msg_seqno(),
+ % out of order enqueues - sorted list
+ pending = [] :: [{msg_seqno(), ra:index(), raw_msg()}],
+ status = up :: up |
+ suspected_down,
+ %% it is useful to have a record of when this was blocked
+ %% so that we can retry sending the block effect if
+ %% the publisher did not receive the initial one
+ blocked :: undefined | ra:index(),
+ unused_1,
+ unused_2
+ }).
+
+-record(cfg,
+ {name :: atom(),
+ resource :: rabbit_types:r('queue'),
+ release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}),
+ dead_letter_handler :: option(applied_mfa()),
+ become_leader_handler :: option(applied_mfa()),
+ overflow_strategy = drop_head :: drop_head | reject_publish,
+ max_length :: option(non_neg_integer()),
+ max_bytes :: option(non_neg_integer()),
+ %% whether single active consumer is on or not for this queue
+ consumer_strategy = competing :: consumer_strategy(),
+ %% the maximum number of unsuccessful delivery attempts permitted
+ delivery_limit :: option(non_neg_integer()),
+ max_in_memory_length :: option(non_neg_integer()),
+ max_in_memory_bytes :: option(non_neg_integer()),
+ expires :: undefined | milliseconds(),
+ unused_1,
+ unused_2
+ }).
+
+-type prefix_msgs() :: {list(), list()} |
+ {non_neg_integer(), list(),
+ non_neg_integer(), list()}.
+
+-record(rabbit_fifo,
+ {cfg :: #cfg{},
+ % unassigned messages
+ messages = lqueue:new() :: lqueue:lqueue({msg_in_id(), indexed_msg()}),
+ % defines the next message id
+ next_msg_num = 1 :: msg_in_id(),
+ % queue of returned msg_in_ids - when checking out it picks from
+ returns = lqueue:new() :: lqueue:lqueue(prefix_msg() |
+ {msg_in_id(), indexed_msg()}),
+ % a counter of enqueues - used to trigger shadow copy points
+ enqueue_count = 0 :: non_neg_integer(),
+ % a map containing all the live processes that have ever enqueued
+ % a message to this queue as well as a cached value of the smallest
+ % ra_index of all pending enqueues
+ enqueuers = #{} :: #{pid() => #enqueuer{}},
+ % master index of all enqueue raft indexes including pending
+ % enqueues
+ % rabbit_fifo_index can be slow when calculating the smallest
+ % index when there are large gaps but should be faster than gb_trees
+ % for normal appending operations as it's backed by a map
+ ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(),
+ release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor,
+ ra:index(), #rabbit_fifo{}}),
+ % consumers need to reflect consumer state at time of snapshot
+ % needs to be part of snapshot
+ consumers = #{} :: #{consumer_id() => #consumer{}},
+ % consumers that require further service are queued here
+ % needs to be part of snapshot
+ service_queue = priority_queue:new() :: priority_queue:q(),
+ %% This is a special field that is only used for snapshots
+ %% It represents the queued messages at the time the
+ %% dehydrated snapshot state was cached.
+ %% As release_cursors are only emitted for raft indexes where all
+ %% prior messages no longer contribute to the current state we can
+ %% replace all message payloads with their sizes (to be used for
+ %% overflow calculations).
+ %% This is done so that consumers are still served in a deterministic
+ %% order on recovery.
+ prefix_msgs = {0, [], 0, []} :: prefix_msgs(),
+ msg_bytes_enqueue = 0 :: non_neg_integer(),
+ msg_bytes_checkout = 0 :: non_neg_integer(),
+ %% waiting consumers, one is picked active consumer is cancelled or dies
+ %% used only when single active consumer is on
+ waiting_consumers = [] :: [{consumer_id(), consumer()}],
+ msg_bytes_in_memory = 0 :: non_neg_integer(),
+ msgs_ready_in_memory = 0 :: non_neg_integer(),
+ last_active :: undefined | non_neg_integer(),
+ unused_1,
+ unused_2
+ }).
+
+-type config() :: #{name := atom(),
+ queue_resource := rabbit_types:r('queue'),
+ dead_letter_handler => applied_mfa(),
+ become_leader_handler => applied_mfa(),
+ release_cursor_interval => non_neg_integer(),
+ max_length => non_neg_integer(),
+ max_bytes => non_neg_integer(),
+ max_in_memory_length => non_neg_integer(),
+ max_in_memory_bytes => non_neg_integer(),
+ overflow_strategy => drop_head | reject_publish,
+ single_active_consumer_on => boolean(),
+ delivery_limit => non_neg_integer(),
+ expires => non_neg_integer(),
+ created => non_neg_integer()
+ }.
diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl
new file mode 100644
index 0000000000..3990222b15
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo_client.erl
@@ -0,0 +1,888 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc Provides an easy to consume API for interacting with the {@link rabbit_fifo.}
+%% state machine implementation running inside a `ra' raft system.
+%%
+%% Handles command tracking and other non-functional concerns.
+-module(rabbit_fifo_client).
+
+-export([
+ init/2,
+ init/3,
+ init/5,
+ checkout/5,
+ cancel_checkout/2,
+ enqueue/2,
+ enqueue/3,
+ dequeue/3,
+ settle/3,
+ return/3,
+ discard/3,
+ credit/4,
+ handle_ra_event/3,
+ untracked_enqueue/2,
+ purge/1,
+ cluster_name/1,
+ update_machine_state/2,
+ pending_size/1,
+ stat/1,
+ stat/2
+ ]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(SOFT_LIMIT, 32).
+-define(TIMER_TIME, 10000).
+
+-type seq() :: non_neg_integer().
+%% last_applied is initialised to -1
+-type maybe_seq() :: integer().
+-type action() :: {send_credit_reply, Available :: non_neg_integer()} |
+ {send_drained, CTagCredit ::
+ {rabbit_fifo:consumer_tag(), non_neg_integer()}}.
+-type actions() :: [action()].
+
+-type cluster_name() :: rabbit_types:r(queue).
+
+-record(consumer, {last_msg_id :: seq() | -1,
+ ack = false :: boolean(),
+ delivery_count = 0 :: non_neg_integer()}).
+
+-record(cfg, {cluster_name :: cluster_name(),
+ servers = [] :: [ra:server_id()],
+ soft_limit = ?SOFT_LIMIT :: non_neg_integer(),
+ block_handler = fun() -> ok end :: fun(() -> term()),
+ unblock_handler = fun() -> ok end :: fun(() -> ok),
+ timeout :: non_neg_integer(),
+ version = 0 :: non_neg_integer()}).
+
+-record(state, {cfg :: #cfg{},
+ leader :: undefined | ra:server_id(),
+ queue_status :: undefined | go | reject_publish,
+ next_seq = 0 :: seq(),
+ %% Last applied is initialise to -1 to note that no command has yet been
+ %% applied, but allowing to resend messages if the first ones on the sequence
+ %% are lost (messages are sent from last_applied + 1)
+ last_applied = -1 :: maybe_seq(),
+ next_enqueue_seq = 1 :: seq(),
+ %% indicates that we've exceeded the soft limit
+ slow = false :: boolean(),
+ unsent_commands = #{} :: #{rabbit_fifo:consumer_id() =>
+ {[seq()], [seq()], [seq()]}},
+ pending = #{} :: #{seq() =>
+ {term(), rabbit_fifo:command()}},
+ consumer_deliveries = #{} :: #{rabbit_fifo:consumer_tag() =>
+ #consumer{}},
+ timer_state :: term()
+ }).
+
+-opaque state() :: #state{}.
+
+-export_type([
+ state/0,
+ actions/0
+ ]).
+
+
+%% @doc Create the initial state for a new rabbit_fifo sessions. A state is needed
+%% to interact with a rabbit_fifo queue using @module.
+%% @param ClusterName the id of the cluster to interact with
+%% @param Servers The known servers of the queue. If the current leader is known
+%% ensure the leader node is at the head of the list.
+-spec init(cluster_name(), [ra:server_id()]) -> state().
+init(ClusterName, Servers) ->
+ init(ClusterName, Servers, ?SOFT_LIMIT).
+
+%% @doc Create the initial state for a new rabbit_fifo sessions. A state is needed
+%% to interact with a rabbit_fifo queue using @module.
+%% @param ClusterName the id of the cluster to interact with
+%% @param Servers The known servers of the queue. If the current leader is known
+%% ensure the leader node is at the head of the list.
+%% @param MaxPending size defining the max number of pending commands.
+-spec init(cluster_name(), [ra:server_id()], non_neg_integer()) -> state().
+init(ClusterName = #resource{}, Servers, SoftLimit) ->
+ Timeout = application:get_env(kernel, net_ticktime, 60) + 5,
+ #state{cfg = #cfg{cluster_name = ClusterName,
+ servers = Servers,
+ soft_limit = SoftLimit,
+ timeout = Timeout * 1000}}.
+
+-spec init(cluster_name(), [ra:server_id()], non_neg_integer(), fun(() -> ok),
+ fun(() -> ok)) -> state().
+init(ClusterName = #resource{}, Servers, SoftLimit, BlockFun, UnblockFun) ->
+ %% net ticktime is in seconds
+ Timeout = application:get_env(kernel, net_ticktime, 60) + 5,
+ #state{cfg = #cfg{cluster_name = ClusterName,
+ servers = Servers,
+ block_handler = BlockFun,
+ unblock_handler = UnblockFun,
+ soft_limit = SoftLimit,
+ timeout = Timeout * 1000}}.
+
+
+%% @doc Enqueues a message.
+%% @param Correlation an arbitrary erlang term used to correlate this
+%% command when it has been applied.
+%% @param Msg an arbitrary erlang term representing the message.
+%% @param State the current {@module} state.
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+%% {@module} assigns a sequence number to every raft command it issues. The
+%% SequenceNumber can be correlated to the applied sequence numbers returned
+%% by the {@link handle_ra_event/2. handle_ra_event/2} function.
+-spec enqueue(Correlation :: term(), Msg :: term(), State :: state()) ->
+ {ok | slow | reject_publish, state()}.
+enqueue(Correlation, Msg,
+ #state{queue_status = undefined,
+ next_enqueue_seq = 1,
+ cfg = #cfg{timeout = Timeout}} = State0) ->
+ %% it is the first enqueue, check the version
+ {_, Node} = Server = pick_server(State0),
+ case rpc:call(Node, ra_machine, version, [{machine, rabbit_fifo, #{}}]) of
+ 0 ->
+ %% the leader is running the old version
+ %% so we can't initialize the enqueuer session safely
+ %% fall back on old behavour
+ enqueue(Correlation, Msg, State0#state{queue_status = go});
+ 1 ->
+ %% were running the new version on the leader do sync initialisation
+ %% of enqueuer session
+ Reg = rabbit_fifo:make_register_enqueuer(self()),
+ case ra:process_command(Server, Reg, Timeout) of
+ {ok, reject_publish, _} ->
+ {reject_publish, State0#state{queue_status = reject_publish}};
+ {ok, ok, _} ->
+ enqueue(Correlation, Msg, State0#state{queue_status = go});
+ {timeout, _} ->
+ %% if we timeout it is probably better to reject
+ %% the message than being uncertain
+ {reject_publish, State0};
+ Err ->
+ exit(Err)
+ end;
+ {badrpc, nodedown} ->
+ {reject_publish, State0}
+ end;
+enqueue(_Correlation, _Msg,
+ #state{queue_status = reject_publish,
+ cfg = #cfg{}} = State) ->
+ {reject_publish, State};
+enqueue(Correlation, Msg,
+ #state{slow = Slow,
+ queue_status = go,
+ cfg = #cfg{block_handler = BlockFun}} = State0) ->
+ Node = pick_server(State0),
+ {Next, State1} = next_enqueue_seq(State0),
+ % by default there is no correlation id
+ Cmd = rabbit_fifo:make_enqueue(self(), Next, Msg),
+ case send_command(Node, Correlation, Cmd, low, State1) of
+ {slow, State} when not Slow ->
+ BlockFun(),
+ {slow, set_timer(State)};
+ Any ->
+ Any
+ end.
+
+%% @doc Enqueues a message.
+%% @param Msg an arbitrary erlang term representing the message.
+%% @param State the current {@module} state.
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+%% {@module} assigns a sequence number to every raft command it issues. The
+%% SequenceNumber can be correlated to the applied sequence numbers returned
+%% by the {@link handle_ra_event/2. handle_ra_event/2} function.
+%%
+-spec enqueue(Msg :: term(), State :: state()) ->
+ {ok | slow | reject_publish, state()}.
+enqueue(Msg, State) ->
+ enqueue(undefined, Msg, State).
+
+%% @doc Dequeue a message from the queue.
+%%
+%% This is a synchronous call. I.e. the call will block until the command
+%% has been accepted by the ra process or it times out.
+%%
+%% @param ConsumerTag a unique tag to identify this particular consumer.
+%% @param Settlement either `settled' or `unsettled'. When `settled' no
+%% further settlement needs to be done.
+%% @param State The {@module} state.
+%%
+%% @returns `{ok, IdMsg, State}' or `{error | timeout, term()}'
+-spec dequeue(rabbit_fifo:consumer_tag(),
+ Settlement :: settled | unsettled, state()) ->
+ {ok, non_neg_integer(), term(), non_neg_integer()}
+ | {empty, state()} | {error | timeout, term()}.
+dequeue(ConsumerTag, Settlement,
+ #state{cfg = #cfg{timeout = Timeout,
+ cluster_name = QName}} = State0) ->
+ Node = pick_server(State0),
+ ConsumerId = consumer_id(ConsumerTag),
+ case ra:process_command(Node,
+ rabbit_fifo:make_checkout(ConsumerId,
+ {dequeue, Settlement},
+ #{}),
+ Timeout) of
+ {ok, {dequeue, empty}, Leader} ->
+ {empty, State0#state{leader = Leader}};
+ {ok, {dequeue, {MsgId, {MsgHeader, Msg0}}, MsgsReady}, Leader} ->
+ Count = case MsgHeader of
+ #{delivery_count := C} -> C;
+ _ -> 0
+ end,
+ IsDelivered = Count > 0,
+ Msg = add_delivery_count_header(Msg0, Count),
+ {ok, MsgsReady,
+ {QName, qref(Leader), MsgId, IsDelivered, Msg},
+ State0#state{leader = Leader}};
+ {ok, {error, _} = Err, _Leader} ->
+ Err;
+ Err ->
+ Err
+ end.
+
+add_delivery_count_header(#basic_message{} = Msg0, Count)
+ when is_integer(Count) ->
+ rabbit_basic:add_header(<<"x-delivery-count">>, long, Count, Msg0);
+add_delivery_count_header(Msg, _Count) ->
+ Msg.
+
+
+%% @doc Settle a message. Permanently removes message from the queue.
+%% @param ConsumerTag the tag uniquely identifying the consumer.
+%% @param MsgIds the message ids received with the {@link rabbit_fifo:delivery/0.}
+%% @param State the {@module} state
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+%%
+-spec settle(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) ->
+ {state(), list()}.
+settle(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) ->
+ Node = pick_server(State0),
+ Cmd = rabbit_fifo:make_settle(consumer_id(ConsumerTag), MsgIds),
+ case send_command(Node, undefined, Cmd, normal, State0) of
+ {_, S} ->
+ % turn slow into ok for this function
+ {S, []}
+ end;
+settle(ConsumerTag, [_|_] = MsgIds,
+ #state{unsent_commands = Unsent0} = State0) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ %% we've reached the soft limit so will stash the command to be
+ %% sent once we have seen enough notifications
+ Unsent = maps:update_with(ConsumerId,
+ fun ({Settles, Returns, Discards}) ->
+ {Settles ++ MsgIds, Returns, Discards}
+ end, {MsgIds, [], []}, Unsent0),
+ {State0#state{unsent_commands = Unsent}, []}.
+
+%% @doc Return a message to the queue.
+%% @param ConsumerTag the tag uniquely identifying the consumer.
+%% @param MsgIds the message ids to return received
+%% from {@link rabbit_fifo:delivery/0.}
+%% @param State the {@module} state
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+%%
+-spec return(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) ->
+ {state(), list()}.
+return(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) ->
+ Node = pick_server(State0),
+ % TODO: make rabbit_fifo return support lists of message ids
+ Cmd = rabbit_fifo:make_return(consumer_id(ConsumerTag), MsgIds),
+ case send_command(Node, undefined, Cmd, normal, State0) of
+ {_, S} ->
+ {S, []}
+ end;
+return(ConsumerTag, [_|_] = MsgIds,
+ #state{unsent_commands = Unsent0} = State0) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ %% we've reached the soft limit so will stash the command to be
+ %% sent once we have seen enough notifications
+ Unsent = maps:update_with(ConsumerId,
+ fun ({Settles, Returns, Discards}) ->
+ {Settles, Returns ++ MsgIds, Discards}
+ end, {[], MsgIds, []}, Unsent0),
+ {State0#state{unsent_commands = Unsent}, []}.
+
+%% @doc Discards a checked out message.
+%% If the queue has a dead_letter_handler configured this will be called.
+%% @param ConsumerTag the tag uniquely identifying the consumer.
+%% @param MsgIds the message ids to discard
+%% from {@link rabbit_fifo:delivery/0.}
+%% @param State the {@module} state
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+-spec discard(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) ->
+ {state(), list()}.
+discard(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) ->
+ Node = pick_server(State0),
+ Cmd = rabbit_fifo:make_discard(consumer_id(ConsumerTag), MsgIds),
+ case send_command(Node, undefined, Cmd, normal, State0) of
+ {_, S} ->
+ % turn slow into ok for this function
+ {S, []}
+ end;
+discard(ConsumerTag, [_|_] = MsgIds,
+ #state{unsent_commands = Unsent0} = State0) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ %% we've reached the soft limit so will stash the command to be
+ %% sent once we have seen enough notifications
+ Unsent = maps:update_with(ConsumerId,
+ fun ({Settles, Returns, Discards}) ->
+ {Settles, Returns, Discards ++ MsgIds}
+ end, {[], [], MsgIds}, Unsent0),
+ {State0#state{unsent_commands = Unsent}, []}.
+
+%% @doc Register with the rabbit_fifo queue to "checkout" messages as they
+%% become available.
+%%
+%% This is a synchronous call. I.e. the call will block until the command
+%% has been accepted by the ra process or it times out.
+%%
+%% @param ConsumerTag a unique tag to identify this particular consumer.
+%% @param NumUnsettled the maximum number of in-flight messages. Once this
+%% number of messages has been received but not settled no further messages
+%% will be delivered to the consumer.
+%% @param CreditMode The credit mode to use for the checkout.
+%% simple_prefetch: credit is auto topped up as deliveries are settled
+%% credited: credit is only increased by sending credit to the queue
+%% @param State The {@module} state.
+%%
+%% @returns `{ok, State}' or `{error | timeout, term()}'
+-spec checkout(rabbit_fifo:consumer_tag(),
+ NumUnsettled :: non_neg_integer(),
+ CreditMode :: rabbit_fifo:credit_mode(),
+ Meta :: rabbit_fifo:consumer_meta(),
+ state()) -> {ok, state()} | {error | timeout, term()}.
+checkout(ConsumerTag, NumUnsettled, CreditMode, Meta,
+ #state{consumer_deliveries = CDels0} = State0) ->
+ Servers = sorted_servers(State0),
+ ConsumerId = {ConsumerTag, self()},
+ Cmd = rabbit_fifo:make_checkout(ConsumerId,
+ {auto, NumUnsettled, CreditMode},
+ Meta),
+ %% ???
+ Ack = maps:get(ack, Meta, true),
+
+ SDels = maps:update_with(ConsumerTag,
+ fun (V) ->
+ V#consumer{ack = Ack}
+ end,
+ #consumer{last_msg_id = -1,
+ ack = Ack}, CDels0),
+ try_process_command(Servers, Cmd, State0#state{consumer_deliveries = SDels}).
+
+%% @doc Provide credit to the queue
+%%
+%% This only has an effect if the consumer uses credit mode: credited
+%% @param ConsumerTag a unique tag to identify this particular consumer.
+%% @param Credit the amount of credit to provide to theq queue
+%% @param Drain tells the queue to use up any credit that cannot be immediately
+%% fulfilled. (i.e. there are not enough messages on queue to use up all the
+%% provided credit).
+-spec credit(rabbit_fifo:consumer_tag(),
+ Credit :: non_neg_integer(),
+ Drain :: boolean(),
+ state()) ->
+ {state(), actions()}.
+credit(ConsumerTag, Credit, Drain,
+ #state{consumer_deliveries = CDels} = State0) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ %% the last received msgid provides us with the delivery count if we
+ %% add one as it is 0 indexed
+ C = maps:get(ConsumerTag, CDels, #consumer{last_msg_id = -1}),
+ Node = pick_server(State0),
+ Cmd = rabbit_fifo:make_credit(ConsumerId, Credit,
+ C#consumer.last_msg_id + 1, Drain),
+ case send_command(Node, undefined, Cmd, normal, State0) of
+ {_, S} ->
+ % turn slow into ok for this function
+ {S, []}
+ end.
+
+%% @doc Cancels a checkout with the rabbit_fifo queue for the consumer tag
+%%
+%% This is a synchronous call. I.e. the call will block until the command
+%% has been accepted by the ra process or it times out.
+%%
+%% @param ConsumerTag a unique tag to identify this particular consumer.
+%% @param State The {@module} state.
+%%
+%% @returns `{ok, State}' or `{error | timeout, term()}'
+-spec cancel_checkout(rabbit_fifo:consumer_tag(), state()) ->
+ {ok, state()} | {error | timeout, term()}.
+cancel_checkout(ConsumerTag, #state{consumer_deliveries = CDels} = State0) ->
+ Servers = sorted_servers(State0),
+ ConsumerId = {ConsumerTag, self()},
+ Cmd = rabbit_fifo:make_checkout(ConsumerId, cancel, #{}),
+ State = State0#state{consumer_deliveries = maps:remove(ConsumerTag, CDels)},
+ try_process_command(Servers, Cmd, State).
+
+%% @doc Purges all the messages from a rabbit_fifo queue and returns the number
+%% of messages purged.
+-spec purge(ra:server_id()) -> {ok, non_neg_integer()} | {error | timeout, term()}.
+purge(Node) ->
+ case ra:process_command(Node, rabbit_fifo:make_purge()) of
+ {ok, {purge, Reply}, _} ->
+ {ok, Reply};
+ Err ->
+ Err
+ end.
+
+-spec pending_size(state()) -> non_neg_integer().
+pending_size(#state{pending = Pend}) ->
+ maps:size(Pend).
+
+-spec stat(ra:server_id()) ->
+ {ok, non_neg_integer(), non_neg_integer()}
+ | {error | timeout, term()}.
+stat(Leader) ->
+ %% short timeout as we don't want to spend too long if it is going to
+ %% fail anyway
+ stat(Leader, 250).
+
+-spec stat(ra:server_id(), non_neg_integer()) ->
+ {ok, non_neg_integer(), non_neg_integer()}
+ | {error | timeout, term()}.
+stat(Leader, Timeout) ->
+ %% short timeout as we don't want to spend too long if it is going to
+ %% fail anyway
+ case ra:local_query(Leader, fun rabbit_fifo:query_stat/1, Timeout) of
+ {ok, {_, {R, C}}, _} -> {ok, R, C};
+ {error, _} = Error -> Error;
+ {timeout, _} = Error -> Error
+ end.
+
+%% @doc returns the cluster name
+-spec cluster_name(state()) -> cluster_name().
+cluster_name(#state{cfg = #cfg{cluster_name = ClusterName}}) ->
+ ClusterName.
+
+update_machine_state(Server, Conf) ->
+ case ra:process_command(Server, rabbit_fifo:make_update_config(Conf)) of
+ {ok, ok, _} ->
+ ok;
+ Err ->
+ Err
+ end.
+
+%% @doc Handles incoming `ra_events'. Events carry both internal "bookeeping"
+%% events emitted by the `ra' leader as well as `rabbit_fifo' emitted events such
+%% as message deliveries. All ra events need to be handled by {@module}
+%% to ensure bookeeping, resends and flow control is correctly handled.
+%%
+%% If the `ra_event' contains a `rabbit_fifo' generated message it will be returned
+%% for further processing.
+%%
+%% Example:
+%%
+%% ```
+%% receive
+%% {ra_event, From, Evt} ->
+%% case rabbit_fifo_client:handle_ra_event(From, Evt, State0) of
+%% {internal, _Seq, State} -> State;
+%% {{delivery, _ConsumerTag, Msgs}, State} ->
+%% handle_messages(Msgs),
+%% ...
+%% end
+%% end
+%% '''
+%%
+%% @param From the {@link ra:server_id().} of the sending process.
+%% @param Event the body of the `ra_event'.
+%% @param State the current {@module} state.
+%%
+%% @returns
+%% `{internal, AppliedCorrelations, State}' if the event contained an internally
+%% handled event such as a notification and a correlation was included with
+%% the command (e.g. in a call to `enqueue/3' the correlation terms are returned
+%% here.
+%%
+%% `{RaFifoEvent, State}' if the event contained a client message generated by
+%% the `rabbit_fifo' state machine such as a delivery.
+%%
+%% The type of `rabbit_fifo' client messages that can be received are:
+%%
+%% `{delivery, ConsumerTag, [{MsgId, {MsgHeader, Msg}}]}'
+%%
+%% <li>`ConsumerTag' the binary tag passed to {@link checkout/3.}</li>
+%% <li>`MsgId' is a consumer scoped monotonically incrementing id that can be
+%% used to {@link settle/3.} (roughly: AMQP 0.9.1 ack) message once finished
+%% with them.</li>
+-spec handle_ra_event(ra:server_id(), ra_server_proc:ra_event_body(), state()) ->
+ {internal, Correlators :: [term()], actions(), state()} |
+ {rabbit_fifo:client_msg(), state()} | eol.
+handle_ra_event(From, {applied, Seqs},
+ #state{cfg = #cfg{cluster_name = QRef,
+ soft_limit = SftLmt,
+ unblock_handler = UnblockFun}} = State0) ->
+
+ {Corrs, Actions0, State1} = lists:foldl(fun seq_applied/2,
+ {[], [], State0#state{leader = From}},
+ Seqs),
+ Actions = case Corrs of
+ [] ->
+ lists:reverse(Actions0);
+ _ ->
+ [{settled, QRef, Corrs}
+ | lists:reverse(Actions0)]
+ end,
+ case maps:size(State1#state.pending) < SftLmt of
+ true when State1#state.slow == true ->
+ % we have exited soft limit state
+ % send any unsent commands and cancel the time as
+ % TODO: really the timer should only be cancelled when the channel
+ % exits flow state (which depends on the state of all queues the
+ % channel is interacting with)
+ % but the fact the queue has just applied suggests
+ % it's ok to cancel here anyway
+ State2 = cancel_timer(State1#state{slow = false,
+ unsent_commands = #{}}),
+ % build up a list of commands to issue
+ Commands = maps:fold(
+ fun (Cid, {Settled, Returns, Discards}, Acc) ->
+ add_command(Cid, settle, Settled,
+ add_command(Cid, return, Returns,
+ add_command(Cid, discard,
+ Discards, Acc)))
+ end, [], State1#state.unsent_commands),
+ Node = pick_server(State2),
+ %% send all the settlements and returns
+ State = lists:foldl(fun (C, S0) ->
+ case send_command(Node, undefined,
+ C, normal, S0) of
+ {T, S} when T =/= error ->
+ S
+ end
+ end, State2, Commands),
+ UnblockFun(),
+ {ok, State, Actions};
+ _ ->
+ {ok, State1, Actions}
+ end;
+handle_ra_event(From, {machine, {delivery, _ConsumerTag, _} = Del}, State0) ->
+ handle_delivery(From, Del, State0);
+handle_ra_event(_, {machine, {queue_status, Status}},
+ #state{} = State) ->
+ %% just set the queue status
+ {ok, State#state{queue_status = Status}, []};
+handle_ra_event(Leader, {machine, leader_change},
+ #state{leader = Leader} = State) ->
+ %% leader already known
+ {ok, State, []};
+handle_ra_event(Leader, {machine, leader_change}, State0) ->
+ %% we need to update leader
+ %% and resend any pending commands
+ State = resend_all_pending(State0#state{leader = Leader}),
+ {ok, State, []};
+handle_ra_event(_From, {rejected, {not_leader, undefined, _Seq}}, State0) ->
+ % TODO: how should these be handled? re-sent on timer or try random
+ {ok, State0, []};
+handle_ra_event(_From, {rejected, {not_leader, Leader, Seq}}, State0) ->
+ State1 = State0#state{leader = Leader},
+ State = resend(Seq, State1),
+ {ok, State, []};
+handle_ra_event(_, timeout, #state{cfg = #cfg{servers = Servers}} = State0) ->
+ case find_leader(Servers) of
+ undefined ->
+ %% still no leader, set the timer again
+ {ok, set_timer(State0), []};
+ Leader ->
+ State = resend_all_pending(State0#state{leader = Leader}),
+ {ok, State, []}
+ end;
+handle_ra_event(_Leader, {machine, eol}, _State0) ->
+ eol.
+
+%% @doc Attempts to enqueue a message using cast semantics. This provides no
+%% guarantees or retries if the message fails to achieve consensus or if the
+%% servers sent to happens not to be available. If the message is sent to a
+%% follower it will attempt the deliver it to the leader, if known. Else it will
+%% drop the messages.
+%%
+%% NB: only use this for non-critical enqueues where a full rabbit_fifo_client state
+%% cannot be maintained.
+%%
+%% @param CusterId the cluster id.
+%% @param Servers the known servers in the cluster.
+%% @param Msg the message to enqueue.
+%%
+%% @returns `ok'
+-spec untracked_enqueue([ra:server_id()], term()) ->
+ ok.
+untracked_enqueue([Node | _], Msg) ->
+ Cmd = rabbit_fifo:make_enqueue(undefined, undefined, Msg),
+ ok = ra:pipeline_command(Node, Cmd),
+ ok.
+
+%% Internal
+
+try_process_command([Server | Rem], Cmd, State) ->
+ case ra:process_command(Server, Cmd, 30000) of
+ {ok, _, Leader} ->
+ {ok, State#state{leader = Leader}};
+ Err when length(Rem) =:= 0 ->
+ Err;
+ _ ->
+ try_process_command(Rem, Cmd, State)
+ end.
+
+seq_applied({Seq, MaybeAction},
+ {Corrs, Actions0, #state{last_applied = Last} = State0})
+ when Seq > Last ->
+ State1 = do_resends(Last+1, Seq-1, State0),
+ {Actions, State} = maybe_add_action(MaybeAction, Actions0, State1),
+ case maps:take(Seq, State#state.pending) of
+ {{undefined, _}, Pending} ->
+ {Corrs, Actions, State#state{pending = Pending,
+ last_applied = Seq}};
+ {{Corr, _}, Pending} ->
+ {[Corr | Corrs], Actions, State#state{pending = Pending,
+ last_applied = Seq}};
+ error ->
+ % must have already been resent or removed for some other reason
+ % still need to update last_applied or we may inadvertently resend
+ % stuff later
+ {Corrs, Actions, State#state{last_applied = Seq}}
+ end;
+seq_applied(_Seq, Acc) ->
+ Acc.
+
+maybe_add_action(ok, Acc, State) ->
+ {Acc, State};
+maybe_add_action({multi, Actions}, Acc0, State0) ->
+ lists:foldl(fun (Act, {Acc, State}) ->
+ maybe_add_action(Act, Acc, State)
+ end, {Acc0, State0}, Actions);
+maybe_add_action({send_drained, {Tag, Credit}} = Action, Acc,
+ #state{consumer_deliveries = CDels} = State) ->
+ %% add credit to consumer delivery_count
+ C = maps:get(Tag, CDels),
+ {[Action | Acc],
+ State#state{consumer_deliveries =
+ update_consumer(Tag, C#consumer.last_msg_id,
+ Credit, C, CDels)}};
+maybe_add_action(Action, Acc, State) ->
+ %% anything else is assumed to be an action
+ {[Action | Acc], State}.
+
+do_resends(From, To, State) when From =< To ->
+ % ?INFO("rabbit_fifo_client: doing resends From ~w To ~w~n", [From, To]),
+ lists:foldl(fun resend/2, State, lists:seq(From, To));
+do_resends(_, _, State) ->
+ State.
+
+% resends a command with a new sequence number
+resend(OldSeq, #state{pending = Pending0, leader = Leader} = State) ->
+ case maps:take(OldSeq, Pending0) of
+ {{Corr, Cmd}, Pending} ->
+ %% resends aren't subject to flow control here
+ resend_command(Leader, Corr, Cmd, State#state{pending = Pending});
+ error ->
+ State
+ end.
+
+resend_all_pending(#state{pending = Pend} = State) ->
+ Seqs = lists:sort(maps:keys(Pend)),
+ lists:foldl(fun resend/2, State, Seqs).
+
+maybe_auto_ack(true, Deliver, State0) ->
+ %% manual ack is enabled
+ {ok, State0, [Deliver]};
+maybe_auto_ack(false, {deliver, Tag, _Ack, Msgs} = Deliver, State0) ->
+ %% we have to auto ack these deliveries
+ MsgIds = [I || {_, _, I, _, _} <- Msgs],
+ {State, Actions} = settle(Tag, MsgIds, State0),
+ {ok, State, [Deliver] ++ Actions}.
+
+
+handle_delivery(Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs},
+ #state{cfg = #cfg{cluster_name = QName},
+ consumer_deliveries = CDels0} = State0) ->
+ QRef = qref(Leader),
+ {LastId, _} = lists:last(IdMsgs),
+ Consumer = #consumer{ack = Ack} = maps:get(Tag, CDels0),
+ %% format as a deliver action
+ Del = {deliver, Tag, Ack, transform_msgs(QName, QRef, IdMsgs)},
+ %% TODO: remove potential default allocation
+ case Consumer of
+ #consumer{last_msg_id = Prev} = C
+ when FstId =:= Prev+1 ->
+ maybe_auto_ack(Ack, Del,
+ State0#state{consumer_deliveries =
+ update_consumer(Tag, LastId,
+ length(IdMsgs), C,
+ CDels0)});
+ #consumer{last_msg_id = Prev} = C
+ when FstId > Prev+1 ->
+ NumMissing = FstId - Prev + 1,
+ %% there may actually be fewer missing messages returned than expected
+ %% This can happen when a node the channel is on gets disconnected
+ %% from the node the leader is on and then reconnected afterwards.
+ %% When the node is disconnected the leader will return all checked
+ %% out messages to the main queue to ensure they don't get stuck in
+ %% case the node never comes back.
+ case get_missing_deliveries(Leader, Prev+1, FstId-1, Tag) of
+ {protocol_error, _, _, _} = Err ->
+ Err;
+ Missing ->
+ XDel = {deliver, Tag, Ack, transform_msgs(QName, QRef,
+ Missing ++ IdMsgs)},
+ maybe_auto_ack(Ack, XDel,
+ State0#state{consumer_deliveries =
+ update_consumer(Tag, LastId,
+ length(IdMsgs) + NumMissing,
+ C, CDels0)})
+ end;
+ #consumer{last_msg_id = Prev}
+ when FstId =< Prev ->
+ case lists:dropwhile(fun({Id, _}) -> Id =< Prev end, IdMsgs) of
+ [] ->
+ {ok, State0, []};
+ IdMsgs2 ->
+ handle_delivery(Leader, {delivery, Tag, IdMsgs2}, State0)
+ end;
+ C when FstId =:= 0 ->
+ % the very first delivery
+ maybe_auto_ack(Ack, Del,
+ State0#state{consumer_deliveries =
+ update_consumer(Tag, LastId,
+ length(IdMsgs),
+ C#consumer{last_msg_id = LastId},
+ CDels0)})
+ end.
+
+transform_msgs(QName, QRef, Msgs) ->
+ lists:map(
+ fun({MsgId, {MsgHeader, Msg0}}) ->
+ {Msg, Redelivered} = case MsgHeader of
+ #{delivery_count := C} ->
+ {add_delivery_count_header(Msg0, C), true};
+ _ ->
+ {Msg0, false}
+ end,
+ {QName, QRef, MsgId, Redelivered, Msg}
+ end, Msgs).
+
+update_consumer(Tag, LastId, DelCntIncr,
+ #consumer{delivery_count = D} = C, Consumers) ->
+ maps:put(Tag,
+ C#consumer{last_msg_id = LastId,
+ delivery_count = D + DelCntIncr},
+ Consumers).
+
+
+get_missing_deliveries(Leader, From, To, ConsumerTag) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ % ?INFO("get_missing_deliveries for ~w from ~b to ~b",
+ % [ConsumerId, From, To]),
+ Query = fun (State) ->
+ rabbit_fifo:get_checked_out(ConsumerId, From, To, State)
+ end,
+ case ra:local_query(Leader, Query) of
+ {ok, {_, Missing}, _} ->
+ Missing;
+ {error, Error} ->
+ {protocol_error, internal_error, "Cannot query missing deliveries from ~p: ~p",
+ [Leader, Error]};
+ {timeout, _} ->
+ {protocol_error, internal_error, "Cannot query missing deliveries from ~p: timeout",
+ [Leader]}
+ end.
+
+pick_server(#state{leader = undefined,
+ cfg = #cfg{servers = [N | _]}}) ->
+ %% TODO: pick random rather that first?
+ N;
+pick_server(#state{leader = Leader}) ->
+ Leader.
+
+% servers sorted by last known leader
+sorted_servers(#state{leader = undefined,
+ cfg = #cfg{servers = Servers}}) ->
+ Servers;
+sorted_servers(#state{leader = Leader,
+ cfg = #cfg{servers = Servers}}) ->
+ [Leader | lists:delete(Leader, Servers)].
+
+next_seq(#state{next_seq = Seq} = State) ->
+ {Seq, State#state{next_seq = Seq + 1}}.
+
+next_enqueue_seq(#state{next_enqueue_seq = Seq} = State) ->
+ {Seq, State#state{next_enqueue_seq = Seq + 1}}.
+
+consumer_id(ConsumerTag) ->
+ {ConsumerTag, self()}.
+
+send_command(Server, Correlation, Command, Priority,
+ #state{pending = Pending,
+ cfg = #cfg{soft_limit = SftLmt}} = State0) ->
+ {Seq, State} = next_seq(State0),
+ ok = ra:pipeline_command(Server, Command, Seq, Priority),
+ Tag = case maps:size(Pending) >= SftLmt of
+ true -> slow;
+ false -> ok
+ end,
+ {Tag, State#state{pending = Pending#{Seq => {Correlation, Command}},
+ slow = Tag == slow}}.
+
+resend_command(Node, Correlation, Command,
+ #state{pending = Pending} = State0) ->
+ {Seq, State} = next_seq(State0),
+ ok = ra:pipeline_command(Node, Command, Seq),
+ State#state{pending = Pending#{Seq => {Correlation, Command}}}.
+
+add_command(_, _, [], Acc) ->
+ Acc;
+add_command(Cid, settle, MsgIds, Acc) ->
+ [rabbit_fifo:make_settle(Cid, MsgIds) | Acc];
+add_command(Cid, return, MsgIds, Acc) ->
+ [rabbit_fifo:make_return(Cid, MsgIds) | Acc];
+add_command(Cid, discard, MsgIds, Acc) ->
+ [rabbit_fifo:make_discard(Cid, MsgIds) | Acc].
+
+set_timer(#state{leader = Leader0,
+ cfg = #cfg{servers = [Server | _],
+ cluster_name = QName}} = State) ->
+ Leader = case Leader0 of
+ undefined -> Server;
+ _ ->
+ Leader0
+ end,
+ Ref = erlang:send_after(?TIMER_TIME, self(),
+ {'$gen_cast',
+ {queue_event, QName, {Leader, timeout}}}),
+ State#state{timer_state = Ref}.
+
+cancel_timer(#state{timer_state = undefined} = State) ->
+ State;
+cancel_timer(#state{timer_state = Ref} = State) ->
+ erlang:cancel_timer(Ref, [{async, true}, {info, false}]),
+ State#state{timer_state = undefined}.
+
+find_leader([]) ->
+ undefined;
+find_leader([Server | Servers]) ->
+ case ra:members(Server, 500) of
+ {ok, _, Leader} -> Leader;
+ _ ->
+ find_leader(Servers)
+ end.
+
+qref({Ref, _}) -> Ref;
+qref(Ref) -> Ref.
diff --git a/deps/rabbit/src/rabbit_fifo_index.erl b/deps/rabbit/src/rabbit_fifo_index.erl
new file mode 100644
index 0000000000..14ac89faff
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo_index.erl
@@ -0,0 +1,119 @@
+-module(rabbit_fifo_index).
+
+-export([
+ empty/0,
+ exists/2,
+ append/2,
+ delete/2,
+ size/1,
+ smallest/1,
+ map/2
+ ]).
+
+-compile({no_auto_import, [size/1]}).
+
+%% the empty atom is a lot smaller (4 bytes) than e.g. `undefined` (13 bytes).
+%% This matters as the data map gets persisted as part of the snapshot
+-define(NIL, '').
+
+-record(?MODULE, {data = #{} :: #{integer() => ?NIL},
+ smallest :: undefined | non_neg_integer(),
+ largest :: undefined | non_neg_integer()
+ }).
+
+
+-opaque state() :: #?MODULE{}.
+
+-export_type([state/0]).
+
+-spec empty() -> state().
+empty() ->
+ #?MODULE{}.
+
+-spec exists(integer(), state()) -> boolean().
+exists(Key, #?MODULE{data = Data}) ->
+ maps:is_key(Key, Data).
+
+% only integer keys are supported
+-spec append(integer(), state()) -> state().
+append(Key,
+ #?MODULE{data = Data,
+ smallest = Smallest,
+ largest = Largest} = State)
+ when Key > Largest orelse Largest =:= undefined ->
+ State#?MODULE{data = maps:put(Key, ?NIL, Data),
+ smallest = ra_lib:default(Smallest, Key),
+ largest = Key}.
+
+-spec delete(Index :: integer(), state()) -> state().
+delete(Smallest, #?MODULE{data = Data0,
+ largest = Largest,
+ smallest = Smallest} = State) ->
+ Data = maps:remove(Smallest, Data0),
+ case find_next(Smallest + 1, Largest, Data) of
+ undefined ->
+ State#?MODULE{data = Data,
+ smallest = undefined,
+ largest = undefined};
+ Next ->
+ State#?MODULE{data = Data, smallest = Next}
+ end;
+delete(Key, #?MODULE{data = Data} = State) ->
+ State#?MODULE{data = maps:remove(Key, Data)}.
+
+-spec size(state()) -> non_neg_integer().
+size(#?MODULE{data = Data}) ->
+ maps:size(Data).
+
+-spec smallest(state()) -> undefined | integer().
+smallest(#?MODULE{smallest = Smallest}) ->
+ Smallest.
+
+
+-spec map(fun(), state()) -> state().
+map(F, #?MODULE{data = Data} = State) ->
+ State#?MODULE{data = maps:map(F, Data)}.
+
+
+%% internal
+
+find_next(Next, Last, _Map) when Next > Last ->
+ undefined;
+find_next(Next, Last, Map) ->
+ case Map of
+ #{Next := _} ->
+ Next;
+ _ ->
+ % in degenerate cases the range here could be very large
+ % and hence this could be very slow
+ % the typical case should ideally be better
+ % assuming fifo-ish deletion of entries
+ find_next(Next+1, Last, Map)
+ end.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+append_test() ->
+ S0 = empty(),
+ false = exists(99, S0),
+ undefined = smallest(S0),
+ 0 = size(S0),
+ S1 = append(1, S0),
+ false = exists(99, S1),
+ true = exists(1, S1),
+ 1 = size(S1),
+ 1 = smallest(S1),
+ S2 = append(2, S1),
+ true = exists(2, S2),
+ 2 = size(S2),
+ 1 = smallest(S2),
+ S3 = delete(1, S2),
+ 2 = smallest(S3),
+ 1 = size(S3),
+ S5 = delete(2, S3),
+ undefined = smallest(S5),
+ 0 = size(S0),
+ ok.
+
+-endif.
diff --git a/deps/rabbit/src/rabbit_fifo_v0.erl b/deps/rabbit/src/rabbit_fifo_v0.erl
new file mode 100644
index 0000000000..a61f42616d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo_v0.erl
@@ -0,0 +1,1961 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_fifo_v0).
+
+-behaviour(ra_machine).
+
+-compile(inline_list_funcs).
+-compile(inline).
+-compile({no_auto_import, [apply/3]}).
+
+-include("rabbit_fifo_v0.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([
+ init/1,
+ apply/3,
+ state_enter/2,
+ tick/2,
+ overview/1,
+ get_checked_out/4,
+ %% aux
+ init_aux/1,
+ handle_aux/6,
+ % queries
+ query_messages_ready/1,
+ query_messages_checked_out/1,
+ query_messages_total/1,
+ query_processes/1,
+ query_ra_indexes/1,
+ query_consumer_count/1,
+ query_consumers/1,
+ query_stat/1,
+ query_single_active_consumer/1,
+ query_in_memory_usage/1,
+ usage/1,
+
+ zero/1,
+
+ %% misc
+ dehydrate_state/1,
+ normalize/1,
+ normalize_for_v1/1,
+ %% getters for coversions
+ get_field/2,
+ get_cfg_field/2,
+
+ %% protocol helpers
+ make_enqueue/3,
+ make_checkout/3,
+ make_settle/2,
+ make_return/2,
+ make_discard/2,
+ make_credit/4,
+ make_purge/0,
+ make_purge_nodes/1,
+ make_update_config/1
+ ]).
+
+%% command records representing all the protocol actions that are supported
+-record(enqueue, {pid :: option(pid()),
+ seq :: option(msg_seqno()),
+ msg :: raw_msg()}).
+-record(checkout, {consumer_id :: consumer_id(),
+ spec :: checkout_spec(),
+ meta :: consumer_meta()}).
+-record(settle, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(return, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(discard, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(credit, {consumer_id :: consumer_id(),
+ credit :: non_neg_integer(),
+ delivery_count :: non_neg_integer(),
+ drain :: boolean()}).
+-record(purge, {}).
+-record(purge_nodes, {nodes :: [node()]}).
+-record(update_config, {config :: config()}).
+
+-opaque protocol() ::
+ #enqueue{} |
+ #checkout{} |
+ #settle{} |
+ #return{} |
+ #discard{} |
+ #credit{} |
+ #purge{} |
+ #purge_nodes{} |
+ #update_config{}.
+
+-type command() :: protocol() | ra_machine:builtin_command().
+%% all the command types supported by ra fifo
+
+-type client_msg() :: delivery().
+%% the messages `rabbit_fifo' can send to consumers.
+
+-opaque state() :: #?STATE{}.
+
+-export_type([protocol/0,
+ delivery/0,
+ command/0,
+ credit_mode/0,
+ consumer_tag/0,
+ consumer_meta/0,
+ consumer_id/0,
+ client_msg/0,
+ msg/0,
+ msg_id/0,
+ msg_seqno/0,
+ delivery_msg/0,
+ state/0,
+ config/0]).
+
+-spec init(config()) -> state().
+init(#{name := Name,
+ queue_resource := Resource} = Conf) ->
+ update_config(Conf, #?STATE{cfg = #cfg{name = Name,
+ resource = Resource}}).
+
+update_config(Conf, State) ->
+ DLH = maps:get(dead_letter_handler, Conf, undefined),
+ BLH = maps:get(become_leader_handler, Conf, undefined),
+ SHI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY),
+ MaxLength = maps:get(max_length, Conf, undefined),
+ MaxBytes = maps:get(max_bytes, Conf, undefined),
+ MaxMemoryLength = maps:get(max_in_memory_length, Conf, undefined),
+ MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined),
+ DeliveryLimit = maps:get(delivery_limit, Conf, undefined),
+ ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of
+ true ->
+ single_active;
+ false ->
+ competing
+ end,
+ Cfg = State#?STATE.cfg,
+ SHICur = case State#?STATE.cfg of
+ #cfg{release_cursor_interval = {_, C}} ->
+ C;
+ #cfg{release_cursor_interval = undefined} ->
+ SHI;
+ #cfg{release_cursor_interval = C} ->
+ C
+ end,
+
+ State#?STATE{cfg = Cfg#cfg{release_cursor_interval = {SHI, SHICur},
+ dead_letter_handler = DLH,
+ become_leader_handler = BLH,
+ max_length = MaxLength,
+ max_bytes = MaxBytes,
+ max_in_memory_length = MaxMemoryLength,
+ max_in_memory_bytes = MaxMemoryBytes,
+ consumer_strategy = ConsumerStrategy,
+ delivery_limit = DeliveryLimit}}.
+
+zero(_) ->
+ 0.
+
+% msg_ids are scoped per consumer
+% ra_indexes holds all raft indexes for enqueues currently on queue
+-spec apply(ra_machine:command_meta_data(), command(), state()) ->
+ {state(), Reply :: term(), ra_machine:effects()} |
+ {state(), Reply :: term()}.
+apply(Metadata, #enqueue{pid = From, seq = Seq,
+ msg = RawMsg}, State00) ->
+ apply_enqueue(Metadata, From, Seq, RawMsg, State00);
+apply(Meta,
+ #settle{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?STATE{consumers = Cons0} = State) ->
+ case Cons0 of
+ #{ConsumerId := Con0} ->
+ % need to increment metrics before completing as any snapshot
+ % states taken need to include them
+ complete_and_checkout(Meta, MsgIds, ConsumerId,
+ Con0, [], State);
+ _ ->
+ {State, ok}
+
+ end;
+apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?STATE{consumers = Cons0} = State0) ->
+ case Cons0 of
+ #{ConsumerId := Con0} ->
+ Discarded = maps:with(MsgIds, Con0#consumer.checked_out),
+ Effects = dead_letter_effects(rejected, Discarded, State0, []),
+ complete_and_checkout(Meta, MsgIds, ConsumerId, Con0,
+ Effects, State0);
+ _ ->
+ {State0, ok}
+ end;
+apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?STATE{consumers = Cons0} = State) ->
+ case Cons0 of
+ #{ConsumerId := #consumer{checked_out = Checked0}} ->
+ Returned = maps:with(MsgIds, Checked0),
+ return(Meta, ConsumerId, Returned, [], State);
+ _ ->
+ {State, ok}
+ end;
+apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt,
+ drain = Drain, consumer_id = ConsumerId},
+ #?STATE{consumers = Cons0,
+ service_queue = ServiceQueue0,
+ waiting_consumers = Waiting0} = State0) ->
+ case Cons0 of
+ #{ConsumerId := #consumer{delivery_count = DelCnt} = Con0} ->
+ %% this can go below 0 when credit is reduced
+ C = max(0, RemoteDelCnt + NewCredit - DelCnt),
+ %% grant the credit
+ Con1 = Con0#consumer{credit = C},
+ ServiceQueue = maybe_queue_consumer(ConsumerId, Con1,
+ ServiceQueue0),
+ Cons = maps:put(ConsumerId, Con1, Cons0),
+ {State1, ok, Effects} =
+ checkout(Meta, State0#?STATE{service_queue = ServiceQueue,
+ consumers = Cons}, []),
+ Response = {send_credit_reply, messages_ready(State1)},
+ %% by this point all checkouts for the updated credit value
+ %% should be processed so we can evaluate the drain
+ case Drain of
+ false ->
+ %% just return the result of the checkout
+ {State1, Response, Effects};
+ true ->
+ Con = #consumer{credit = PostCred} =
+ maps:get(ConsumerId, State1#?STATE.consumers),
+ %% add the outstanding credit to the delivery count
+ DeliveryCount = Con#consumer.delivery_count + PostCred,
+ Consumers = maps:put(ConsumerId,
+ Con#consumer{delivery_count = DeliveryCount,
+ credit = 0},
+ State1#?STATE.consumers),
+ Drained = Con#consumer.credit,
+ {CTag, _} = ConsumerId,
+ {State1#?STATE{consumers = Consumers},
+ %% returning a multi response with two client actions
+ %% for the channel to execute
+ {multi, [Response, {send_drained, {CTag, Drained}}]},
+ Effects}
+ end;
+ _ when Waiting0 /= [] ->
+ %% there are waiting consuemrs
+ case lists:keytake(ConsumerId, 1, Waiting0) of
+ {value, {_, Con0 = #consumer{delivery_count = DelCnt}}, Waiting} ->
+ %% the consumer is a waiting one
+ %% grant the credit
+ C = max(0, RemoteDelCnt + NewCredit - DelCnt),
+ Con = Con0#consumer{credit = C},
+ State = State0#?STATE{waiting_consumers =
+ [{ConsumerId, Con} | Waiting]},
+ {State, {send_credit_reply, messages_ready(State)}};
+ false ->
+ {State0, ok}
+ end;
+ _ ->
+ %% credit for unknown consumer - just ignore
+ {State0, ok}
+ end;
+apply(_, #checkout{spec = {dequeue, _}},
+ #?STATE{cfg = #cfg{consumer_strategy = single_active}} = State0) ->
+ {State0, {error, unsupported}};
+apply(#{from := From} = Meta, #checkout{spec = {dequeue, Settlement},
+ meta = ConsumerMeta,
+ consumer_id = ConsumerId},
+ #?STATE{consumers = Consumers} = State0) ->
+ Exists = maps:is_key(ConsumerId, Consumers),
+ case messages_ready(State0) of
+ 0 ->
+ {State0, {dequeue, empty}};
+ _ when Exists ->
+ %% a dequeue using the same consumer_id isn't possible at this point
+ {State0, {dequeue, empty}};
+ Ready ->
+ State1 = update_consumer(ConsumerId, ConsumerMeta,
+ {once, 1, simple_prefetch},
+ State0),
+ {success, _, MsgId, Msg, State2} = checkout_one(State1),
+ {State, Effects} = case Settlement of
+ unsettled ->
+ {_, Pid} = ConsumerId,
+ {State2, [{monitor, process, Pid}]};
+ settled ->
+ %% immediately settle the checkout
+ {State3, _, Effects0} =
+ apply(Meta, make_settle(ConsumerId, [MsgId]),
+ State2),
+ {State3, Effects0}
+ end,
+ case Msg of
+ {RaftIdx, {Header, 'empty'}} ->
+ %% TODO add here new log effect with reply
+ {State, '$ra_no_reply',
+ reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From)};
+ _ ->
+ {State, {dequeue, {MsgId, Msg}, Ready-1}, Effects}
+ end
+ end;
+apply(Meta, #checkout{spec = cancel, consumer_id = ConsumerId}, State0) ->
+ {State, Effects} = cancel_consumer(ConsumerId, State0, [], consumer_cancel),
+ checkout(Meta, State, Effects);
+apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta,
+ consumer_id = {_, Pid} = ConsumerId},
+ State0) ->
+ State1 = update_consumer(ConsumerId, ConsumerMeta, Spec, State0),
+ checkout(Meta, State1, [{monitor, process, Pid}]);
+apply(#{index := RaftIdx}, #purge{},
+ #?STATE{ra_indexes = Indexes0,
+ returns = Returns,
+ messages = Messages} = State0) ->
+ Total = messages_ready(State0),
+ Indexes1 = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0,
+ [I || {I, _} <- lists:sort(maps:values(Messages))]),
+ Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes1,
+ [I || {_, {I, _}} <- lqueue:to_list(Returns)]),
+ {State, _, Effects} =
+ update_smallest_raft_index(RaftIdx,
+ State0#?STATE{ra_indexes = Indexes,
+ messages = #{},
+ returns = lqueue:new(),
+ msg_bytes_enqueue = 0,
+ prefix_msgs = {0, [], 0, []},
+ low_msg_num = undefined,
+ msg_bytes_in_memory = 0,
+ msgs_ready_in_memory = 0},
+ []),
+ %% as we're not checking out after a purge (no point) we have to
+ %% reverse the effects ourselves
+ {State, {purge, Total},
+ lists:reverse([garbage_collection | Effects])};
+apply(Meta, {down, Pid, noconnection},
+ #?STATE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = Waiting0,
+ enqueuers = Enqs0} = State0) ->
+ Node = node(Pid),
+ %% if the pid refers to an active or cancelled consumer,
+ %% mark it as suspected and return it to the waiting queue
+ {State1, Effects0} =
+ maps:fold(fun({_, P} = Cid, C0, {S0, E0})
+ when node(P) =:= Node ->
+ %% the consumer should be returned to waiting
+ %% and checked out messages should be returned
+ Effs = consumer_update_active_effects(
+ S0, Cid, C0, false, suspected_down, E0),
+ Checked = C0#consumer.checked_out,
+ Credit = increase_credit(C0, maps:size(Checked)),
+ {St, Effs1} = return_all(S0, Effs,
+ Cid, C0#consumer{credit = Credit}),
+ %% if the consumer was cancelled there is a chance it got
+ %% removed when returning hence we need to be defensive here
+ Waiting = case St#?STATE.consumers of
+ #{Cid := C} ->
+ Waiting0 ++ [{Cid, C}];
+ _ ->
+ Waiting0
+ end,
+ {St#?STATE{consumers = maps:remove(Cid, St#?STATE.consumers),
+ waiting_consumers = Waiting},
+ Effs1};
+ (_, _, S) ->
+ S
+ end, {State0, []}, Cons0),
+ WaitingConsumers = update_waiting_consumer_status(Node, State1,
+ suspected_down),
+
+ %% select a new consumer from the waiting queue and run a checkout
+ State2 = State1#?STATE{waiting_consumers = WaitingConsumers},
+ {State, Effects1} = activate_next_consumer(State2, Effects0),
+
+ %% mark any enquers as suspected
+ Enqs = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = suspected_down};
+ (_, E) -> E
+ end, Enqs0),
+ Effects = [{monitor, node, Node} | Effects1],
+ checkout(Meta, State#?STATE{enqueuers = Enqs}, Effects);
+apply(Meta, {down, Pid, noconnection},
+ #?STATE{consumers = Cons0,
+ enqueuers = Enqs0} = State0) ->
+ %% A node has been disconnected. This doesn't necessarily mean that
+ %% any processes on this node are down, they _may_ come back so here
+ %% we just mark them as suspected (effectively deactivated)
+ %% and return all checked out messages to the main queue for delivery to any
+ %% live consumers
+ %%
+ %% all pids for the disconnected node will be marked as suspected not just
+ %% the one we got the `down' command for
+ Node = node(Pid),
+
+ {State, Effects1} =
+ maps:fold(
+ fun({_, P} = Cid, #consumer{checked_out = Checked0,
+ status = up} = C0,
+ {St0, Eff}) when node(P) =:= Node ->
+ Credit = increase_credit(C0, map_size(Checked0)),
+ C = C0#consumer{status = suspected_down,
+ credit = Credit},
+ {St, Eff0} = return_all(St0, Eff, Cid, C),
+ Eff1 = consumer_update_active_effects(St, Cid, C, false,
+ suspected_down, Eff0),
+ {St, Eff1};
+ (_, _, {St, Eff}) ->
+ {St, Eff}
+ end, {State0, []}, Cons0),
+ Enqs = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = suspected_down};
+ (_, E) -> E
+ end, Enqs0),
+
+ % Monitor the node so that we can "unsuspect" these processes when the node
+ % comes back, then re-issue all monitors and discover the final fate of
+ % these processes
+ Effects = case maps:size(State#?STATE.consumers) of
+ 0 ->
+ [{aux, inactive}, {monitor, node, Node}];
+ _ ->
+ [{monitor, node, Node}]
+ end ++ Effects1,
+ checkout(Meta, State#?STATE{enqueuers = Enqs}, Effects);
+apply(Meta, {down, Pid, _Info}, State0) ->
+ {State, Effects} = handle_down(Pid, State0),
+ checkout(Meta, State, Effects);
+apply(Meta, {nodeup, Node}, #?STATE{consumers = Cons0,
+ enqueuers = Enqs0,
+ service_queue = SQ0} = State0) ->
+ %% A node we are monitoring has come back.
+ %% If we have suspected any processes of being
+ %% down we should now re-issue the monitors for them to detect if they're
+ %% actually down or not
+ Monitors = [{monitor, process, P}
+ || P <- suspected_pids_for(Node, State0)],
+
+ Enqs1 = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = up};
+ (_, E) -> E
+ end, Enqs0),
+ ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0),
+ %% mark all consumers as up
+ {Cons1, SQ, Effects1} =
+ maps:fold(fun({_, P} = ConsumerId, C, {CAcc, SQAcc, EAcc})
+ when (node(P) =:= Node) and
+ (C#consumer.status =/= cancelled) ->
+ EAcc1 = ConsumerUpdateActiveFun(State0, ConsumerId,
+ C, true, up, EAcc),
+ update_or_remove_sub(ConsumerId,
+ C#consumer{status = up}, CAcc,
+ SQAcc, EAcc1);
+ (_, _, Acc) ->
+ Acc
+ end, {Cons0, SQ0, Monitors}, Cons0),
+ Waiting = update_waiting_consumer_status(Node, State0, up),
+ State1 = State0#?STATE{consumers = Cons1,
+ enqueuers = Enqs1,
+ service_queue = SQ,
+ waiting_consumers = Waiting},
+ {State, Effects} = activate_next_consumer(State1, Effects1),
+ checkout(Meta, State, Effects);
+apply(_, {nodedown, _Node}, State) ->
+ {State, ok};
+apply(_, #purge_nodes{nodes = Nodes}, State0) ->
+ {State, Effects} = lists:foldl(fun(Node, {S, E}) ->
+ purge_node(Node, S, E)
+ end, {State0, []}, Nodes),
+ {State, ok, Effects};
+apply(Meta, #update_config{config = Conf}, State) ->
+ checkout(Meta, update_config(Conf, State), []).
+
+purge_node(Node, State, Effects) ->
+ lists:foldl(fun(Pid, {S0, E0}) ->
+ {S, E} = handle_down(Pid, S0),
+ {S, E0 ++ E}
+ end, {State, Effects}, all_pids_for(Node, State)).
+
+%% any downs that re not noconnection
+handle_down(Pid, #?STATE{consumers = Cons0,
+ enqueuers = Enqs0} = State0) ->
+ % Remove any enqueuer for the same pid and enqueue any pending messages
+ % This should be ok as we won't see any more enqueues from this pid
+ State1 = case maps:take(Pid, Enqs0) of
+ {#enqueuer{pending = Pend}, Enqs} ->
+ lists:foldl(fun ({_, RIdx, RawMsg}, S) ->
+ enqueue(RIdx, RawMsg, S)
+ end, State0#?STATE{enqueuers = Enqs}, Pend);
+ error ->
+ State0
+ end,
+ {Effects1, State2} = handle_waiting_consumer_down(Pid, State1),
+ % return checked out messages to main queue
+ % Find the consumers for the down pid
+ DownConsumers = maps:keys(
+ maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)),
+ lists:foldl(fun(ConsumerId, {S, E}) ->
+ cancel_consumer(ConsumerId, S, E, down)
+ end, {State2, Effects1}, DownConsumers).
+
+consumer_active_flag_update_function(#?STATE{cfg = #cfg{consumer_strategy = competing}}) ->
+ fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) ->
+ consumer_update_active_effects(State, ConsumerId, Consumer, Active,
+ ActivityStatus, Effects)
+ end;
+consumer_active_flag_update_function(#?STATE{cfg = #cfg{consumer_strategy = single_active}}) ->
+ fun(_, _, _, _, _, Effects) ->
+ Effects
+ end.
+
+handle_waiting_consumer_down(_Pid,
+ #?STATE{cfg = #cfg{consumer_strategy = competing}} = State) ->
+ {[], State};
+handle_waiting_consumer_down(_Pid,
+ #?STATE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = []} = State) ->
+ {[], State};
+handle_waiting_consumer_down(Pid,
+ #?STATE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = WaitingConsumers0} = State0) ->
+ % get cancel effects for down waiting consumers
+ Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end,
+ WaitingConsumers0),
+ Effects = lists:foldl(fun ({ConsumerId, _}, Effects) ->
+ cancel_consumer_effects(ConsumerId, State0,
+ Effects)
+ end, [], Down),
+ % update state to have only up waiting consumers
+ StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end,
+ WaitingConsumers0),
+ State = State0#?STATE{waiting_consumers = StillUp},
+ {Effects, State}.
+
+update_waiting_consumer_status(Node,
+ #?STATE{waiting_consumers = WaitingConsumers},
+ Status) ->
+ [begin
+ case node(Pid) of
+ Node ->
+ {ConsumerId, Consumer#consumer{status = Status}};
+ _ ->
+ {ConsumerId, Consumer}
+ end
+ end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers,
+ Consumer#consumer.status =/= cancelled].
+
+-spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects().
+state_enter(leader, #?STATE{consumers = Cons,
+ enqueuers = Enqs,
+ waiting_consumers = WaitingConsumers,
+ cfg = #cfg{name = Name,
+ resource = Resource,
+ become_leader_handler = BLH},
+ prefix_msgs = {0, [], 0, []}
+ }) ->
+ % return effects to monitor all current consumers and enqueuers
+ Pids = lists:usort(maps:keys(Enqs)
+ ++ [P || {_, P} <- maps:keys(Cons)]
+ ++ [P || {{_, P}, _} <- WaitingConsumers]),
+ Mons = [{monitor, process, P} || P <- Pids],
+ Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids],
+ NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]),
+ FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}],
+ Effects = Mons ++ Nots ++ NodeMons ++ FHReservation,
+ case BLH of
+ undefined ->
+ Effects;
+ {Mod, Fun, Args} ->
+ [{mod_call, Mod, Fun, Args ++ [Name]} | Effects]
+ end;
+state_enter(eol, #?STATE{enqueuers = Enqs,
+ consumers = Custs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0),
+ WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end,
+ #{}, WaitingConsumers0),
+ AllConsumers = maps:merge(Custs, WaitingConsumers1),
+ [{send_msg, P, eol, ra_event}
+ || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++
+ [{mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}];
+state_enter(State, #?STATE{cfg = #cfg{resource = _Resource}}) when State =/= leader ->
+ FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []},
+ [FHReservation];
+ state_enter(_, _) ->
+ %% catch all as not handling all states
+ [].
+
+
+-spec tick(non_neg_integer(), state()) -> ra_machine:effects().
+tick(_Ts, #?STATE{cfg = #cfg{name = Name,
+ resource = QName},
+ msg_bytes_enqueue = EnqueueBytes,
+ msg_bytes_checkout = CheckoutBytes} = State) ->
+ Metrics = {Name,
+ messages_ready(State),
+ num_checked_out(State), % checked out
+ messages_total(State),
+ query_consumer_count(State), % Consumers
+ EnqueueBytes,
+ CheckoutBytes},
+ [{mod_call, rabbit_quorum_queue,
+ handle_tick, [QName, Metrics, all_nodes(State)]}].
+
+-spec overview(state()) -> map().
+overview(#?STATE{consumers = Cons,
+ enqueuers = Enqs,
+ release_cursors = Cursors,
+ enqueue_count = EnqCount,
+ msg_bytes_enqueue = EnqueueBytes,
+ msg_bytes_checkout = CheckoutBytes,
+ cfg = Cfg} = State) ->
+ Conf = #{name => Cfg#cfg.name,
+ resource => Cfg#cfg.resource,
+ release_cursor_interval => Cfg#cfg.release_cursor_interval,
+ dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler,
+ max_length => Cfg#cfg.max_length,
+ max_bytes => Cfg#cfg.max_bytes,
+ consumer_strategy => Cfg#cfg.consumer_strategy,
+ max_in_memory_length => Cfg#cfg.max_in_memory_length,
+ max_in_memory_bytes => Cfg#cfg.max_in_memory_bytes},
+ #{type => ?MODULE,
+ config => Conf,
+ num_consumers => maps:size(Cons),
+ num_checked_out => num_checked_out(State),
+ num_enqueuers => maps:size(Enqs),
+ num_ready_messages => messages_ready(State),
+ num_messages => messages_total(State),
+ num_release_cursors => lqueue:len(Cursors),
+ release_crusor_enqueue_counter => EnqCount,
+ enqueue_message_bytes => EnqueueBytes,
+ checkout_message_bytes => CheckoutBytes}.
+
+-spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) ->
+ [delivery_msg()].
+get_checked_out(Cid, From, To, #?STATE{consumers = Consumers}) ->
+ case Consumers of
+ #{Cid := #consumer{checked_out = Checked}} ->
+ [{K, snd(snd(maps:get(K, Checked)))}
+ || K <- lists:seq(From, To),
+ maps:is_key(K, Checked)];
+ _ ->
+ []
+ end.
+
+-record(aux_gc, {last_raft_idx = 0 :: ra:index()}).
+-record(aux, {name :: atom(),
+ utilisation :: term(),
+ gc = #aux_gc{} :: #aux_gc{}}).
+
+init_aux(Name) when is_atom(Name) ->
+ %% TODO: catch specific exception throw if table already exists
+ ok = ra_machine_ets:create_table(rabbit_fifo_usage,
+ [named_table, set, public,
+ {write_concurrency, true}]),
+ Now = erlang:monotonic_time(micro_seconds),
+ #aux{name = Name,
+ utilisation = {inactive, Now, 1, 1.0}}.
+
+handle_aux(_RaState, cast, Cmd, #aux{name = Name,
+ utilisation = Use0} = State0,
+ Log, MacState) ->
+ State = case Cmd of
+ _ when Cmd == active orelse Cmd == inactive ->
+ State0#aux{utilisation = update_use(Use0, Cmd)};
+ tick ->
+ true = ets:insert(rabbit_fifo_usage,
+ {Name, utilisation(Use0)}),
+ eval_gc(Log, MacState, State0);
+ eval ->
+ State0
+ end,
+ {no_reply, State, Log}.
+
+eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
+ #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) ->
+ {Idx, _} = ra_log:last_index_term(Log),
+ {memory, Mem} = erlang:process_info(self(), memory),
+ case messages_total(MacState) of
+ 0 when Idx > LastGcIdx andalso
+ Mem > ?GC_MEM_LIMIT_B ->
+ garbage_collect(),
+ {memory, MemAfter} = erlang:process_info(self(), memory),
+ rabbit_log:debug("~s: full GC sweep complete. "
+ "Process memory changed from ~.2fMB to ~.2fMB.",
+ [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
+ AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
+ _ ->
+ AuxState
+ end.
+
+%%% Queries
+
+query_messages_ready(State) ->
+ messages_ready(State).
+
+query_messages_checked_out(#?STATE{consumers = Consumers}) ->
+ maps:fold(fun (_, #consumer{checked_out = C}, S) ->
+ maps:size(C) + S
+ end, 0, Consumers).
+
+query_messages_total(State) ->
+ messages_total(State).
+
+query_processes(#?STATE{enqueuers = Enqs, consumers = Cons0}) ->
+ Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0),
+ maps:keys(maps:merge(Enqs, Cons)).
+
+
+query_ra_indexes(#?STATE{ra_indexes = RaIndexes}) ->
+ RaIndexes.
+
+query_consumer_count(#?STATE{consumers = Consumers,
+ waiting_consumers = WaitingConsumers}) ->
+ maps:size(Consumers) + length(WaitingConsumers).
+
+query_consumers(#?STATE{consumers = Consumers,
+ waiting_consumers = WaitingConsumers,
+ cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) ->
+ ActiveActivityStatusFun =
+ case ConsumerStrategy of
+ competing ->
+ fun(_ConsumerId,
+ #consumer{status = Status}) ->
+ case Status of
+ suspected_down ->
+ {false, Status};
+ _ ->
+ {true, Status}
+ end
+ end;
+ single_active ->
+ SingleActiveConsumer = query_single_active_consumer(State),
+ fun({Tag, Pid} = _Consumer, _) ->
+ case SingleActiveConsumer of
+ {value, {Tag, Pid}} ->
+ {true, single_active};
+ _ ->
+ {false, waiting}
+ end
+ end
+ end,
+ FromConsumers =
+ maps:fold(fun (_, #consumer{status = cancelled}, Acc) ->
+ Acc;
+ ({Tag, Pid}, #consumer{meta = Meta} = Consumer, Acc) ->
+ {Active, ActivityStatus} =
+ ActiveActivityStatusFun({Tag, Pid}, Consumer),
+ maps:put({Tag, Pid},
+ {Pid, Tag,
+ maps:get(ack, Meta, undefined),
+ maps:get(prefetch, Meta, undefined),
+ Active,
+ ActivityStatus,
+ maps:get(args, Meta, []),
+ maps:get(username, Meta, undefined)},
+ Acc)
+ end, #{}, Consumers),
+ FromWaitingConsumers =
+ lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) ->
+ Acc;
+ ({{Tag, Pid}, #consumer{meta = Meta} = Consumer}, Acc) ->
+ {Active, ActivityStatus} =
+ ActiveActivityStatusFun({Tag, Pid}, Consumer),
+ maps:put({Tag, Pid},
+ {Pid, Tag,
+ maps:get(ack, Meta, undefined),
+ maps:get(prefetch, Meta, undefined),
+ Active,
+ ActivityStatus,
+ maps:get(args, Meta, []),
+ maps:get(username, Meta, undefined)},
+ Acc)
+ end, #{}, WaitingConsumers),
+ maps:merge(FromConsumers, FromWaitingConsumers).
+
+query_single_active_consumer(#?STATE{cfg = #cfg{consumer_strategy = single_active},
+ consumers = Consumers}) ->
+ case maps:size(Consumers) of
+ 0 ->
+ {error, no_value};
+ 1 ->
+ {value, lists:nth(1, maps:keys(Consumers))};
+ _
+ ->
+ {error, illegal_size}
+ end ;
+query_single_active_consumer(_) ->
+ disabled.
+
+query_stat(#?STATE{consumers = Consumers} = State) ->
+ {messages_ready(State), maps:size(Consumers)}.
+
+query_in_memory_usage(#?STATE{msg_bytes_in_memory = Bytes,
+ msgs_ready_in_memory = Length}) ->
+ {Length, Bytes}.
+
+-spec usage(atom()) -> float().
+usage(Name) when is_atom(Name) ->
+ case ets:lookup(rabbit_fifo_usage, Name) of
+ [] -> 0.0;
+ [{_, Use}] -> Use
+ end.
+
+%%% Internal
+
+messages_ready(#?STATE{messages = M,
+ prefix_msgs = {RCnt, _R, PCnt, _P},
+ returns = R}) ->
+
+ %% prefix messages will rarely have anything in them during normal
+ %% operations so length/1 is fine here
+ maps:size(M) + lqueue:len(R) + RCnt + PCnt.
+
+messages_total(#?STATE{ra_indexes = I,
+ prefix_msgs = {RCnt, _R, PCnt, _P}}) ->
+ rabbit_fifo_index:size(I) + RCnt + PCnt.
+
+update_use({inactive, _, _, _} = CUInfo, inactive) ->
+ CUInfo;
+update_use({active, _, _} = CUInfo, active) ->
+ CUInfo;
+update_use({active, Since, Avg}, inactive) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {inactive, Now, Now - Since, Avg};
+update_use({inactive, Since, Active, Avg}, active) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {active, Now, use_avg(Active, Now - Since, Avg)}.
+
+utilisation({active, Since, Avg}) ->
+ use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg);
+utilisation({inactive, Since, Active, Avg}) ->
+ use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg).
+
+use_avg(0, 0, Avg) ->
+ Avg;
+use_avg(Active, Inactive, Avg) ->
+ Time = Inactive + Active,
+ moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg).
+
+moving_average(_Time, _, Next, undefined) ->
+ Next;
+moving_average(Time, HalfLife, Next, Current) ->
+ Weight = math:exp(Time * math:log(0.5) / HalfLife),
+ Next * (1 - Weight) + Current * Weight.
+
+num_checked_out(#?STATE{consumers = Cons}) ->
+ maps:fold(fun (_, #consumer{checked_out = C}, Acc) ->
+ maps:size(C) + Acc
+ end, 0, Cons).
+
+cancel_consumer(ConsumerId,
+ #?STATE{cfg = #cfg{consumer_strategy = competing}} = State,
+ Effects, Reason) ->
+ cancel_consumer0(ConsumerId, State, Effects, Reason);
+cancel_consumer(ConsumerId,
+ #?STATE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = []} = State,
+ Effects, Reason) ->
+ %% single active consumer on, no consumers are waiting
+ cancel_consumer0(ConsumerId, State, Effects, Reason);
+cancel_consumer(ConsumerId,
+ #?STATE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = Waiting0} = State0,
+ Effects0, Reason) ->
+ %% single active consumer on, consumers are waiting
+ case maps:is_key(ConsumerId, Cons0) of
+ true ->
+ % The active consumer is to be removed
+ {State1, Effects1} = cancel_consumer0(ConsumerId, State0,
+ Effects0, Reason),
+ activate_next_consumer(State1, Effects1);
+ false ->
+ % The cancelled consumer is not active or cancelled
+ % Just remove it from idle_consumers
+ Waiting = lists:keydelete(ConsumerId, 1, Waiting0),
+ Effects = cancel_consumer_effects(ConsumerId, State0, Effects0),
+ % A waiting consumer isn't supposed to have any checked out messages,
+ % so nothing special to do here
+ {State0#?STATE{waiting_consumers = Waiting}, Effects}
+ end.
+
+consumer_update_active_effects(#?STATE{cfg = #cfg{resource = QName}},
+ ConsumerId, #consumer{meta = Meta},
+ Active, ActivityStatus,
+ Effects) ->
+ Ack = maps:get(ack, Meta, undefined),
+ Prefetch = maps:get(prefetch, Meta, undefined),
+ Args = maps:get(args, Meta, []),
+ [{mod_call, rabbit_quorum_queue, update_consumer_handler,
+ [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]}
+ | Effects].
+
+cancel_consumer0(ConsumerId, #?STATE{consumers = C0} = S0, Effects0, Reason) ->
+ case C0 of
+ #{ConsumerId := Consumer} ->
+ {S, Effects2} = maybe_return_all(ConsumerId, Consumer, S0,
+ Effects0, Reason),
+ %% The effects are emitted before the consumer is actually removed
+ %% if the consumer has unacked messages. This is a bit weird but
+ %% in line with what classic queues do (from an external point of
+ %% view)
+ Effects = cancel_consumer_effects(ConsumerId, S, Effects2),
+ case maps:size(S#?STATE.consumers) of
+ 0 ->
+ {S, [{aux, inactive} | Effects]};
+ _ ->
+ {S, Effects}
+ end;
+ _ ->
+ %% already removed: do nothing
+ {S0, Effects0}
+ end.
+
+activate_next_consumer(#?STATE{consumers = Cons,
+ waiting_consumers = Waiting0} = State0,
+ Effects0) ->
+ case maps:filter(fun (_, #consumer{status = S}) -> S == up end, Cons) of
+ Up when map_size(Up) == 0 ->
+ %% there are no active consumer in the consumer map
+ case lists:filter(fun ({_, #consumer{status = Status}}) ->
+ Status == up
+ end, Waiting0) of
+ [{NextConsumerId, NextConsumer} | _] ->
+ %% there is a potential next active consumer
+ Remaining = lists:keydelete(NextConsumerId, 1, Waiting0),
+ #?STATE{service_queue = ServiceQueue} = State0,
+ ServiceQueue1 = maybe_queue_consumer(NextConsumerId,
+ NextConsumer,
+ ServiceQueue),
+ State = State0#?STATE{consumers = Cons#{NextConsumerId => NextConsumer},
+ service_queue = ServiceQueue1,
+ waiting_consumers = Remaining},
+ Effects = consumer_update_active_effects(State, NextConsumerId,
+ NextConsumer, true,
+ single_active, Effects0),
+ {State, Effects};
+ [] ->
+ {State0, [{aux, inactive} | Effects0]}
+ end;
+ _ ->
+ {State0, Effects0}
+ end.
+
+
+
+maybe_return_all(ConsumerId, Consumer,
+ #?STATE{consumers = C0,
+ service_queue = SQ0} = S0,
+ Effects0, Reason) ->
+ case Reason of
+ consumer_cancel ->
+ {Cons, SQ, Effects1} =
+ update_or_remove_sub(ConsumerId,
+ Consumer#consumer{lifetime = once,
+ credit = 0,
+ status = cancelled},
+ C0, SQ0, Effects0),
+ {S0#?STATE{consumers = Cons,
+ service_queue = SQ}, Effects1};
+ down ->
+ {S1, Effects1} = return_all(S0, Effects0, ConsumerId, Consumer),
+ {S1#?STATE{consumers = maps:remove(ConsumerId, S1#?STATE.consumers)},
+ Effects1}
+ end.
+
+apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) ->
+ case maybe_enqueue(RaftIdx, From, Seq, RawMsg, [], State0) of
+ {ok, State1, Effects1} ->
+ State2 = append_to_master_index(RaftIdx, State1),
+ {State, ok, Effects} = checkout(Meta, State2, Effects1),
+ {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects};
+ {duplicate, State, Effects} ->
+ {State, ok, Effects}
+ end.
+
+drop_head(#?STATE{ra_indexes = Indexes0} = State0, Effects0) ->
+ case take_next_msg(State0) of
+ {FullMsg = {_MsgId, {RaftIdxToDrop, {Header, Msg}}},
+ State1} ->
+ Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0),
+ State2 = add_bytes_drop(Header, State1#?STATE{ra_indexes = Indexes}),
+ State = case Msg of
+ 'empty' -> State2;
+ _ -> subtract_in_memory_counts(Header, State2)
+ end,
+ Effects = dead_letter_effects(maxlen, #{none => FullMsg},
+ State, Effects0),
+ {State, Effects};
+ {{'$prefix_msg', Header}, State1} ->
+ State2 = subtract_in_memory_counts(Header, add_bytes_drop(Header, State1)),
+ {State2, Effects0};
+ {{'$empty_msg', Header}, State1} ->
+ State2 = add_bytes_drop(Header, State1),
+ {State2, Effects0};
+ empty ->
+ {State0, Effects0}
+ end.
+
+enqueue(RaftIdx, RawMsg, #?STATE{messages = Messages,
+ low_msg_num = LowMsgNum,
+ next_msg_num = NextMsgNum} = State0) ->
+ %% the initial header is an integer only - it will get expanded to a map
+ %% when the next required key is added
+ Header = message_size(RawMsg),
+ {State1, Msg} =
+ case evaluate_memory_limit(Header, State0) of
+ true ->
+ % indexed message with header map
+ {State0, {RaftIdx, {Header, 'empty'}}};
+ false ->
+ {add_in_memory_counts(Header, State0),
+ {RaftIdx, {Header, RawMsg}}} % indexed message with header map
+ end,
+ State = add_bytes_enqueue(Header, State1),
+ State#?STATE{messages = Messages#{NextMsgNum => Msg},
+ %% this is probably only done to record it when low_msg_num
+ %% is undefined
+ low_msg_num = min(LowMsgNum, NextMsgNum),
+ next_msg_num = NextMsgNum + 1}.
+
+append_to_master_index(RaftIdx,
+ #?STATE{ra_indexes = Indexes0} = State0) ->
+ State = incr_enqueue_count(State0),
+ Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0),
+ State#?STATE{ra_indexes = Indexes}.
+
+
+incr_enqueue_count(#?STATE{enqueue_count = C,
+ cfg = #cfg{release_cursor_interval = {_Base, C}}
+ } = State0) ->
+ %% this will trigger a dehydrated version of the state to be stored
+ %% at this raft index for potential future snapshot generation
+ %% Q: Why don't we just stash the release cursor here?
+ %% A: Because it needs to be the very last thing we do and we
+ %% first needs to run the checkout logic.
+ State0#?STATE{enqueue_count = 0};
+incr_enqueue_count(#?STATE{cfg = #cfg{release_cursor_interval = C} = Cfg}
+ = State0)
+ when is_integer(C) ->
+ %% conversion to new release cursor interval format
+ State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = {C, C}}},
+ incr_enqueue_count(State);
+incr_enqueue_count(#?STATE{enqueue_count = C} = State) ->
+ State#?STATE{enqueue_count = C + 1}.
+
+maybe_store_dehydrated_state(RaftIdx,
+ #?STATE{cfg =
+ #cfg{release_cursor_interval = {Base, _}}
+ = Cfg,
+ ra_indexes = Indexes,
+ enqueue_count = 0,
+ release_cursors = Cursors0} = State0) ->
+ case rabbit_fifo_index:exists(RaftIdx, Indexes) of
+ false ->
+ %% the incoming enqueue must already have been dropped
+ State0;
+ true ->
+ Interval = case Base of
+ 0 -> 0;
+ _ ->
+ Total = messages_total(State0),
+ min(max(Total, Base),
+ ?RELEASE_CURSOR_EVERY_MAX)
+ end,
+ State = convert_prefix_msgs(
+ State0#?STATE{cfg = Cfg#cfg{release_cursor_interval =
+ {Base, Interval}}}),
+ Dehydrated = dehydrate_state(State),
+ Cursor = {release_cursor, RaftIdx, Dehydrated},
+ Cursors = lqueue:in(Cursor, Cursors0),
+ State#?STATE{release_cursors = Cursors}
+ end;
+maybe_store_dehydrated_state(RaftIdx,
+ #?STATE{cfg =
+ #cfg{release_cursor_interval = C} = Cfg}
+ = State0)
+ when is_integer(C) ->
+ %% convert to new format
+ State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = {C, C}}},
+ maybe_store_dehydrated_state(RaftIdx, State);
+maybe_store_dehydrated_state(_RaftIdx, State) ->
+ State.
+
+enqueue_pending(From,
+ #enqueuer{next_seqno = Next,
+ pending = [{Next, RaftIdx, RawMsg} | Pending]} = Enq0,
+ State0) ->
+ State = enqueue(RaftIdx, RawMsg, State0),
+ Enq = Enq0#enqueuer{next_seqno = Next + 1, pending = Pending},
+ enqueue_pending(From, Enq, State);
+enqueue_pending(From, Enq, #?STATE{enqueuers = Enqueuers0} = State) ->
+ State#?STATE{enqueuers = Enqueuers0#{From => Enq}}.
+
+maybe_enqueue(RaftIdx, undefined, undefined, RawMsg, Effects, State0) ->
+ % direct enqueue without tracking
+ State = enqueue(RaftIdx, RawMsg, State0),
+ {ok, State, Effects};
+maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0,
+ #?STATE{enqueuers = Enqueuers0} = State0) ->
+ case maps:get(From, Enqueuers0, undefined) of
+ undefined ->
+ State1 = State0#?STATE{enqueuers = Enqueuers0#{From => #enqueuer{}}},
+ {ok, State, Effects} = maybe_enqueue(RaftIdx, From, MsgSeqNo,
+ RawMsg, Effects0, State1),
+ {ok, State, [{monitor, process, From} | Effects]};
+ #enqueuer{next_seqno = MsgSeqNo} = Enq0 ->
+ % it is the next expected seqno
+ State1 = enqueue(RaftIdx, RawMsg, State0),
+ Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1},
+ State = enqueue_pending(From, Enq, State1),
+ {ok, State, Effects0};
+ #enqueuer{next_seqno = Next,
+ pending = Pending0} = Enq0
+ when MsgSeqNo > Next ->
+ % out of order delivery
+ Pending = [{MsgSeqNo, RaftIdx, RawMsg} | Pending0],
+ Enq = Enq0#enqueuer{pending = lists:sort(Pending)},
+ {ok, State0#?STATE{enqueuers = Enqueuers0#{From => Enq}}, Effects0};
+ #enqueuer{next_seqno = Next} when MsgSeqNo =< Next ->
+ % duplicate delivery - remove the raft index from the ra_indexes
+ % map as it was added earlier
+ {duplicate, State0, Effects0}
+ end.
+
+snd(T) ->
+ element(2, T).
+
+return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned,
+ Effects0, #?STATE{service_queue = SQ0} = State0) ->
+ {State1, Effects1} = maps:fold(
+ fun(MsgId, {Tag, _} = Msg, {S0, E0})
+ when Tag == '$prefix_msg';
+ Tag == '$empty_msg'->
+ return_one(MsgId, 0, Msg, S0, E0, ConsumerId);
+ (MsgId, {MsgNum, Msg}, {S0, E0}) ->
+ return_one(MsgId, MsgNum, Msg, S0, E0,
+ ConsumerId)
+ end, {State0, Effects0}, Returned),
+ {State2, Effects3} =
+ case State1#?STATE.consumers of
+ #{ConsumerId := Con0} = Cons0 ->
+ Con = Con0#consumer{credit = increase_credit(Con0,
+ map_size(Returned))},
+ {Cons, SQ, Effects2} = update_or_remove_sub(ConsumerId, Con,
+ Cons0, SQ0, Effects1),
+ {State1#?STATE{consumers = Cons,
+ service_queue = SQ}, Effects2};
+ _ ->
+ {State1, Effects1}
+ end,
+ {State, ok, Effects} = checkout(Meta, State2, Effects3),
+ update_smallest_raft_index(IncomingRaftIdx, State, Effects).
+
+% used to processes messages that are finished
+complete(ConsumerId, Discarded,
+ #consumer{checked_out = Checked} = Con0, Effects0,
+ #?STATE{consumers = Cons0, service_queue = SQ0,
+ ra_indexes = Indexes0} = State0) ->
+ %% TODO optimise use of Discarded map here
+ MsgRaftIdxs = [RIdx || {_, {RIdx, _}} <- maps:values(Discarded)],
+ %% credit_mode = simple_prefetch should automatically top-up credit
+ %% as messages are simple_prefetch or otherwise returned
+ Con = Con0#consumer{checked_out = maps:without(maps:keys(Discarded), Checked),
+ credit = increase_credit(Con0, map_size(Discarded))},
+ {Cons, SQ, Effects} = update_or_remove_sub(ConsumerId, Con, Cons0,
+ SQ0, Effects0),
+ Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0,
+ MsgRaftIdxs),
+ %% TODO: use maps:fold instead
+ State1 = lists:foldl(fun({_, {_, {Header, _}}}, Acc) ->
+ add_bytes_settle(Header, Acc);
+ ({'$prefix_msg', Header}, Acc) ->
+ add_bytes_settle(Header, Acc);
+ ({'$empty_msg', Header}, Acc) ->
+ add_bytes_settle(Header, Acc)
+ end, State0, maps:values(Discarded)),
+ {State1#?STATE{consumers = Cons,
+ ra_indexes = Indexes,
+ service_queue = SQ}, Effects}.
+
+increase_credit(#consumer{lifetime = once,
+ credit = Credit}, _) ->
+ %% once consumers cannot increment credit
+ Credit;
+increase_credit(#consumer{lifetime = auto,
+ credit_mode = credited,
+ credit = Credit}, _) ->
+ %% credit_mode: credit also doesn't automatically increment credit
+ Credit;
+increase_credit(#consumer{credit = Current}, Credit) ->
+ Current + Credit.
+
+complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId,
+ #consumer{checked_out = Checked0} = Con0,
+ Effects0, State0) ->
+ Discarded = maps:with(MsgIds, Checked0),
+ {State2, Effects1} = complete(ConsumerId, Discarded, Con0,
+ Effects0, State0),
+ {State, ok, Effects} = checkout(Meta, State2, Effects1),
+ update_smallest_raft_index(IncomingRaftIdx, State, Effects).
+
+dead_letter_effects(_Reason, _Discarded,
+ #?STATE{cfg = #cfg{dead_letter_handler = undefined}},
+ Effects) ->
+ Effects;
+dead_letter_effects(Reason, Discarded,
+ #?STATE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}},
+ Effects) ->
+ RaftIdxs = maps:fold(
+ fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) ->
+ [RaftIdx | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Discarded),
+ [{log, RaftIdxs,
+ fun (Log) ->
+ Lookup = maps:from_list(lists:zip(RaftIdxs, Log)),
+ DeadLetters = maps:fold(
+ fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) ->
+ {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup),
+ [{Reason, Msg} | Acc];
+ (_, {_, {_, {_Header, Msg}}}, Acc) ->
+ [{Reason, Msg} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Discarded),
+ [{mod_call, Mod, Fun, Args ++ [DeadLetters]}]
+ end} | Effects].
+
+cancel_consumer_effects(ConsumerId,
+ #?STATE{cfg = #cfg{resource = QName}}, Effects) ->
+ [{mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [QName, ConsumerId]} | Effects].
+
+update_smallest_raft_index(IncomingRaftIdx,
+ #?STATE{ra_indexes = Indexes,
+ release_cursors = Cursors0} = State0,
+ Effects) ->
+ case rabbit_fifo_index:size(Indexes) of
+ 0 ->
+ % there are no messages on queue anymore and no pending enqueues
+ % we can forward release_cursor all the way until
+ % the last received command, hooray
+ State = State0#?STATE{release_cursors = lqueue:new()},
+ {State, ok, Effects ++ [{release_cursor, IncomingRaftIdx, State}]};
+ _ ->
+ Smallest = rabbit_fifo_index:smallest(Indexes),
+ case find_next_cursor(Smallest, Cursors0) of
+ {empty, Cursors} ->
+ {State0#?STATE{release_cursors = Cursors},
+ ok, Effects};
+ {Cursor, Cursors} ->
+ %% we can emit a release cursor we've passed the smallest
+ %% release cursor available.
+ {State0#?STATE{release_cursors = Cursors}, ok,
+ Effects ++ [Cursor]}
+ end
+ end.
+
+find_next_cursor(Idx, Cursors) ->
+ find_next_cursor(Idx, Cursors, empty).
+
+find_next_cursor(Smallest, Cursors0, Potential) ->
+ case lqueue:out(Cursors0) of
+ {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest ->
+ %% we found one but it may not be the largest one
+ find_next_cursor(Smallest, Cursors, Cursor);
+ _ ->
+ {Potential, Cursors0}
+ end.
+
+update_header(Key, UpdateFun, Default, Header)
+ when is_integer(Header) ->
+ update_header(Key, UpdateFun, Default, #{size => Header});
+update_header(Key, UpdateFun, Default, Header) ->
+ maps:update_with(Key, UpdateFun, Default, Header).
+
+
+return_one(MsgId, 0, {Tag, Header0},
+ #?STATE{returns = Returns,
+ consumers = Consumers,
+ cfg = #cfg{delivery_limit = DeliveryLimit}} = State0,
+ Effects0, ConsumerId)
+ when Tag == '$prefix_msg'; Tag == '$empty_msg' ->
+ #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers),
+ Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0),
+ Msg0 = {Tag, Header},
+ case maps:get(delivery_count, Header) of
+ DeliveryCount when DeliveryCount > DeliveryLimit ->
+ complete(ConsumerId, #{MsgId => Msg0}, Con0, Effects0, State0);
+ _ ->
+ %% this should not affect the release cursor in any way
+ Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)},
+ {Msg, State1} = case Tag of
+ '$empty_msg' ->
+ {Msg0, State0};
+ _ -> case evaluate_memory_limit(Header, State0) of
+ true ->
+ {{'$empty_msg', Header}, State0};
+ false ->
+ {Msg0, add_in_memory_counts(Header, State0)}
+ end
+ end,
+ {add_bytes_return(
+ Header,
+ State1#?STATE{consumers = Consumers#{ConsumerId => Con},
+ returns = lqueue:in(Msg, Returns)}),
+ Effects0}
+ end;
+return_one(MsgId, MsgNum, {RaftId, {Header0, RawMsg}},
+ #?STATE{returns = Returns,
+ consumers = Consumers,
+ cfg = #cfg{delivery_limit = DeliveryLimit}} = State0,
+ Effects0, ConsumerId) ->
+ #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers),
+ Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0),
+ Msg0 = {RaftId, {Header, RawMsg}},
+ case maps:get(delivery_count, Header) of
+ DeliveryCount when DeliveryCount > DeliveryLimit ->
+ DlMsg = {MsgNum, Msg0},
+ Effects = dead_letter_effects(delivery_limit, #{none => DlMsg},
+ State0, Effects0),
+ complete(ConsumerId, #{MsgId => DlMsg}, Con0, Effects, State0);
+ _ ->
+ Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)},
+ %% this should not affect the release cursor in any way
+ {Msg, State1} = case RawMsg of
+ 'empty' ->
+ {Msg0, State0};
+ _ ->
+ case evaluate_memory_limit(Header, State0) of
+ true ->
+ {{RaftId, {Header, 'empty'}}, State0};
+ false ->
+ {Msg0, add_in_memory_counts(Header, State0)}
+ end
+ end,
+ {add_bytes_return(
+ Header,
+ State1#?STATE{consumers = Consumers#{ConsumerId => Con},
+ returns = lqueue:in({MsgNum, Msg}, Returns)}),
+ Effects0}
+ end.
+
+return_all(#?STATE{consumers = Cons} = State0, Effects0, ConsumerId,
+ #consumer{checked_out = Checked0} = Con) ->
+ %% need to sort the list so that we return messages in the order
+ %% they were checked out
+ Checked = lists:sort(maps:to_list(Checked0)),
+ State = State0#?STATE{consumers = Cons#{ConsumerId => Con}},
+ lists:foldl(fun ({MsgId, {'$prefix_msg', _} = Msg}, {S, E}) ->
+ return_one(MsgId, 0, Msg, S, E, ConsumerId);
+ ({MsgId, {'$empty_msg', _} = Msg}, {S, E}) ->
+ return_one(MsgId, 0, Msg, S, E, ConsumerId);
+ ({MsgId, {MsgNum, Msg}}, {S, E}) ->
+ return_one(MsgId, MsgNum, Msg, S, E, ConsumerId)
+ end, {State, Effects0}, Checked).
+
+%% checkout new messages to consumers
+checkout(#{index := Index}, State0, Effects0) ->
+ {State1, _Result, Effects1} = checkout0(checkout_one(State0),
+ Effects0, {#{}, #{}}),
+ case evaluate_limit(false, State1, Effects1) of
+ {State, true, Effects} ->
+ update_smallest_raft_index(Index, State, Effects);
+ {State, false, Effects} ->
+ {State, ok, Effects}
+ end.
+
+checkout0({success, ConsumerId, MsgId, {RaftIdx, {Header, 'empty'}}, State},
+ Effects, {SendAcc, LogAcc0}) ->
+ DelMsg = {RaftIdx, {MsgId, Header}},
+ LogAcc = maps:update_with(ConsumerId,
+ fun (M) -> [DelMsg | M] end,
+ [DelMsg], LogAcc0),
+ checkout0(checkout_one(State), Effects, {SendAcc, LogAcc});
+checkout0({success, ConsumerId, MsgId, Msg, State}, Effects,
+ {SendAcc0, LogAcc}) ->
+ DelMsg = {MsgId, Msg},
+ SendAcc = maps:update_with(ConsumerId,
+ fun (M) -> [DelMsg | M] end,
+ [DelMsg], SendAcc0),
+ checkout0(checkout_one(State), Effects, {SendAcc, LogAcc});
+checkout0({Activity, State0}, Effects0, {SendAcc, LogAcc}) ->
+ Effects1 = case Activity of
+ nochange ->
+ append_send_msg_effects(
+ append_log_effects(Effects0, LogAcc), SendAcc);
+ inactive ->
+ [{aux, inactive}
+ | append_send_msg_effects(
+ append_log_effects(Effects0, LogAcc), SendAcc)]
+ end,
+ {State0, ok, lists:reverse(Effects1)}.
+
+evaluate_limit(Result,
+ #?STATE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}} = State,
+ Effects) ->
+ {State, Result, Effects};
+evaluate_limit(Result, State00, Effects0) ->
+ State0 = convert_prefix_msgs(State00),
+ case is_over_limit(State0) of
+ true ->
+ {State, Effects} = drop_head(State0, Effects0),
+ evaluate_limit(true, State, Effects);
+ false ->
+ {State0, Result, Effects0}
+ end.
+
+evaluate_memory_limit(_Header,
+ #?STATE{cfg = #cfg{max_in_memory_length = undefined,
+ max_in_memory_bytes = undefined}}) ->
+ false;
+evaluate_memory_limit(#{size := Size}, State) ->
+ evaluate_memory_limit(Size, State);
+evaluate_memory_limit(Size,
+ #?STATE{cfg = #cfg{max_in_memory_length = MaxLength,
+ max_in_memory_bytes = MaxBytes},
+ msg_bytes_in_memory = Bytes,
+ msgs_ready_in_memory = Length})
+ when is_integer(Size) ->
+ (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes).
+
+append_send_msg_effects(Effects, AccMap) when map_size(AccMap) == 0 ->
+ Effects;
+append_send_msg_effects(Effects0, AccMap) ->
+ Effects = maps:fold(fun (C, Msgs, Ef) ->
+ [send_msg_effect(C, lists:reverse(Msgs)) | Ef]
+ end, Effects0, AccMap),
+ [{aux, active} | Effects].
+
+append_log_effects(Effects0, AccMap) ->
+ maps:fold(fun (C, Msgs, Ef) ->
+ [send_log_effect(C, lists:reverse(Msgs)) | Ef]
+ end, Effects0, AccMap).
+
+%% next message is determined as follows:
+%% First we check if there are are prefex returns
+%% Then we check if there are current returns
+%% then we check prefix msgs
+%% then we check current messages
+%%
+%% When we return it is always done to the current return queue
+%% for both prefix messages and current messages
+take_next_msg(#?STATE{prefix_msgs = {R, P}} = State) ->
+ %% conversion
+ take_next_msg(State#?STATE{prefix_msgs = {length(R), R, length(P), P}});
+take_next_msg(#?STATE{prefix_msgs = {NumR, [{'$empty_msg', _} = Msg | Rem],
+ NumP, P}} = State) ->
+ %% there are prefix returns, these should be served first
+ {Msg, State#?STATE{prefix_msgs = {NumR-1, Rem, NumP, P}}};
+take_next_msg(#?STATE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) ->
+ %% there are prefix returns, these should be served first
+ {{'$prefix_msg', Header},
+ State#?STATE{prefix_msgs = {NumR-1, Rem, NumP, P}}};
+take_next_msg(#?STATE{returns = Returns,
+ low_msg_num = Low0,
+ messages = Messages0,
+ prefix_msgs = {NumR, R, NumP, P}} = State) ->
+ %% use peek rather than out there as the most likely case is an empty
+ %% queue
+ case lqueue:peek(Returns) of
+ {value, NextMsg} ->
+ {NextMsg,
+ State#?STATE{returns = lqueue:drop(Returns)}};
+ empty when P == [] ->
+ case Low0 of
+ undefined ->
+ empty;
+ _ ->
+ {Msg, Messages} = maps:take(Low0, Messages0),
+ case maps:size(Messages) of
+ 0 ->
+ {{Low0, Msg},
+ State#?STATE{messages = Messages,
+ low_msg_num = undefined}};
+ _ ->
+ {{Low0, Msg},
+ State#?STATE{messages = Messages,
+ low_msg_num = Low0 + 1}}
+ end
+ end;
+ empty ->
+ [Msg | Rem] = P,
+ case Msg of
+ {Header, 'empty'} ->
+ %% There are prefix msgs
+ {{'$empty_msg', Header},
+ State#?STATE{prefix_msgs = {NumR, R, NumP-1, Rem}}};
+ Header ->
+ {{'$prefix_msg', Header},
+ State#?STATE{prefix_msgs = {NumR, R, NumP-1, Rem}}}
+ end
+ end.
+
+send_msg_effect({CTag, CPid}, Msgs) ->
+ {send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}.
+
+send_log_effect({CTag, CPid}, IdxMsgs) ->
+ {RaftIdxs, Data} = lists:unzip(IdxMsgs),
+ {log, RaftIdxs,
+ fun(Log) ->
+ Msgs = lists:zipwith(fun ({enqueue, _, _, Msg}, {MsgId, Header}) ->
+ {MsgId, {Header, Msg}}
+ end, Log, Data),
+ [{send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}]
+ end,
+ {local, node(CPid)}}.
+
+reply_log_effect(RaftIdx, MsgId, Header, Ready, From) ->
+ {log, [RaftIdx],
+ fun([{enqueue, _, _, Msg}]) ->
+ [{reply, From, {wrap_reply,
+ {dequeue, {MsgId, {Header, Msg}}, Ready}}}]
+ end}.
+
+checkout_one(#?STATE{service_queue = SQ0,
+ messages = Messages0,
+ consumers = Cons0} = InitState) ->
+ case queue:peek(SQ0) of
+ {value, ConsumerId} ->
+ case take_next_msg(InitState) of
+ {ConsumerMsg, State0} ->
+ SQ1 = queue:drop(SQ0),
+ %% there are consumers waiting to be serviced
+ %% process consumer checkout
+ case maps:find(ConsumerId, Cons0) of
+ {ok, #consumer{credit = 0}} ->
+ %% no credit but was still on queue
+ %% can happen when draining
+ %% recurse without consumer on queue
+ checkout_one(InitState#?STATE{service_queue = SQ1});
+ {ok, #consumer{status = cancelled}} ->
+ checkout_one(InitState#?STATE{service_queue = SQ1});
+ {ok, #consumer{status = suspected_down}} ->
+ checkout_one(InitState#?STATE{service_queue = SQ1});
+ {ok, #consumer{checked_out = Checked0,
+ next_msg_id = Next,
+ credit = Credit,
+ delivery_count = DelCnt} = Con0} ->
+ Checked = maps:put(Next, ConsumerMsg, Checked0),
+ Con = Con0#consumer{checked_out = Checked,
+ next_msg_id = Next + 1,
+ credit = Credit - 1,
+ delivery_count = DelCnt + 1},
+ {Cons, SQ, []} = % we expect no effects
+ update_or_remove_sub(ConsumerId, Con,
+ Cons0, SQ1, []),
+ State1 = State0#?STATE{service_queue = SQ,
+ consumers = Cons},
+ {State, Msg} =
+ case ConsumerMsg of
+ {'$prefix_msg', Header} ->
+ {subtract_in_memory_counts(
+ Header, add_bytes_checkout(Header, State1)),
+ ConsumerMsg};
+ {'$empty_msg', Header} ->
+ {add_bytes_checkout(Header, State1),
+ ConsumerMsg};
+ {_, {_, {Header, 'empty'}} = M} ->
+ {add_bytes_checkout(Header, State1),
+ M};
+ {_, {_, {Header, _} = M}} ->
+ {subtract_in_memory_counts(
+ Header,
+ add_bytes_checkout(Header, State1)),
+ M}
+ end,
+ {success, ConsumerId, Next, Msg, State};
+ error ->
+ %% consumer did not exist but was queued, recurse
+ checkout_one(InitState#?STATE{service_queue = SQ1})
+ end;
+ empty ->
+ {nochange, InitState}
+ end;
+ empty ->
+ case maps:size(Messages0) of
+ 0 -> {nochange, InitState};
+ _ -> {inactive, InitState}
+ end
+ end.
+
+update_or_remove_sub(ConsumerId, #consumer{lifetime = auto,
+ credit = 0} = Con,
+ Cons, ServiceQueue, Effects) ->
+ {maps:put(ConsumerId, Con, Cons), ServiceQueue, Effects};
+update_or_remove_sub(ConsumerId, #consumer{lifetime = auto} = Con,
+ Cons, ServiceQueue, Effects) ->
+ {maps:put(ConsumerId, Con, Cons),
+ uniq_queue_in(ConsumerId, ServiceQueue), Effects};
+update_or_remove_sub(ConsumerId, #consumer{lifetime = once,
+ checked_out = Checked,
+ credit = 0} = Con,
+ Cons, ServiceQueue, Effects) ->
+ case maps:size(Checked) of
+ 0 ->
+ % we're done with this consumer
+ % TODO: demonitor consumer pid but _only_ if there are no other
+ % monitors for this pid
+ {maps:remove(ConsumerId, Cons), ServiceQueue, Effects};
+ _ ->
+ % there are unsettled items so need to keep around
+ {maps:put(ConsumerId, Con, Cons), ServiceQueue, Effects}
+ end;
+update_or_remove_sub(ConsumerId, #consumer{lifetime = once} = Con,
+ Cons, ServiceQueue, Effects) ->
+ {maps:put(ConsumerId, Con, Cons),
+ uniq_queue_in(ConsumerId, ServiceQueue), Effects}.
+
+uniq_queue_in(Key, Queue) ->
+ % TODO: queue:member could surely be quite expensive, however the practical
+ % number of unique consumers may not be large enough for it to matter
+ case queue:member(Key, Queue) of
+ true ->
+ Queue;
+ false ->
+ queue:in(Key, Queue)
+ end.
+
+update_consumer(ConsumerId, Meta, Spec,
+ #?STATE{cfg = #cfg{consumer_strategy = competing}} = State0) ->
+ %% general case, single active consumer off
+ update_consumer0(ConsumerId, Meta, Spec, State0);
+update_consumer(ConsumerId, Meta, Spec,
+ #?STATE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active}} = State0)
+ when map_size(Cons0) == 0 ->
+ %% single active consumer on, no one is consuming yet
+ update_consumer0(ConsumerId, Meta, Spec, State0);
+update_consumer(ConsumerId, Meta, {Life, Credit, Mode},
+ #?STATE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = WaitingConsumers0} = State0) ->
+ %% single active consumer on and one active consumer already
+ %% adding the new consumer to the waiting list
+ Consumer = #consumer{lifetime = Life, meta = Meta,
+ credit = Credit, credit_mode = Mode},
+ WaitingConsumers1 = WaitingConsumers0 ++ [{ConsumerId, Consumer}],
+ State0#?STATE{waiting_consumers = WaitingConsumers1}.
+
+update_consumer0(ConsumerId, Meta, {Life, Credit, Mode},
+ #?STATE{consumers = Cons0,
+ service_queue = ServiceQueue0} = State0) ->
+ %% TODO: this logic may not be correct for updating a pre-existing consumer
+ Init = #consumer{lifetime = Life, meta = Meta,
+ credit = Credit, credit_mode = Mode},
+ Cons = maps:update_with(ConsumerId,
+ fun(S) ->
+ %% remove any in-flight messages from
+ %% the credit update
+ N = maps:size(S#consumer.checked_out),
+ C = max(0, Credit - N),
+ S#consumer{lifetime = Life, credit = C}
+ end, Init, Cons0),
+ ServiceQueue = maybe_queue_consumer(ConsumerId, maps:get(ConsumerId, Cons),
+ ServiceQueue0),
+ State0#?STATE{consumers = Cons, service_queue = ServiceQueue}.
+
+maybe_queue_consumer(ConsumerId, #consumer{credit = Credit},
+ ServiceQueue0) ->
+ case Credit > 0 of
+ true ->
+ % consumerect needs service - check if already on service queue
+ uniq_queue_in(ConsumerId, ServiceQueue0);
+ false ->
+ ServiceQueue0
+ end.
+
+convert_prefix_msgs(#?STATE{prefix_msgs = {R, P}} = State) ->
+ State#?STATE{prefix_msgs = {length(R), R, length(P), P}};
+convert_prefix_msgs(State) ->
+ State.
+
+%% creates a dehydrated version of the current state to be cached and
+%% potentially used to for a snaphot at a later point
+dehydrate_state(#?STATE{messages = Messages,
+ consumers = Consumers,
+ returns = Returns,
+ low_msg_num = Low,
+ next_msg_num = Next,
+ prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0},
+ waiting_consumers = Waiting0} = State) ->
+ RCnt = lqueue:len(Returns),
+ %% TODO: optimise this function as far as possible
+ PrefRet1 = lists:foldr(fun ({'$prefix_msg', Header}, Acc) ->
+ [Header | Acc];
+ ({'$empty_msg', _} = Msg, Acc) ->
+ [Msg | Acc];
+ ({_, {_, {Header, 'empty'}}}, Acc) ->
+ [{'$empty_msg', Header} | Acc];
+ ({_, {_, {Header, _}}}, Acc) ->
+ [Header | Acc]
+ end,
+ [],
+ lqueue:to_list(Returns)),
+ PrefRet = PrefRet0 ++ PrefRet1,
+ PrefMsgsSuff = dehydrate_messages(Low, Next - 1, Messages, []),
+ %% prefix messages are not populated in normal operation only after
+ %% recovering from a snapshot
+ PrefMsgs = PrefMsg0 ++ PrefMsgsSuff,
+ Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0],
+ State#?STATE{messages = #{},
+ ra_indexes = rabbit_fifo_index:empty(),
+ release_cursors = lqueue:new(),
+ low_msg_num = undefined,
+ consumers = maps:map(fun (_, C) ->
+ dehydrate_consumer(C)
+ end, Consumers),
+ returns = lqueue:new(),
+ prefix_msgs = {PRCnt + RCnt, PrefRet,
+ PPCnt + maps:size(Messages), PrefMsgs},
+ waiting_consumers = Waiting}.
+
+dehydrate_messages(Low, Next, _Msgs, Acc)
+ when Next < Low ->
+ Acc;
+dehydrate_messages(Low, Next, Msgs, Acc0) ->
+ Acc = case maps:get(Next, Msgs) of
+ {_RaftIdx, {_, 'empty'} = Msg} ->
+ [Msg | Acc0];
+ {_RaftIdx, {Header, _}} ->
+ [Header | Acc0]
+ end,
+ dehydrate_messages(Low, Next - 1, Msgs, Acc).
+
+dehydrate_consumer(#consumer{checked_out = Checked0} = Con) ->
+ Checked = maps:map(fun (_, {'$prefix_msg', _} = M) ->
+ M;
+ (_, {'$empty_msg', _} = M) ->
+ M;
+ (_, {_, {_, {Header, 'empty'}}}) ->
+ {'$empty_msg', Header};
+ (_, {_, {_, {Header, _}}}) ->
+ {'$prefix_msg', Header}
+ end, Checked0),
+ Con#consumer{checked_out = Checked}.
+
+%% make the state suitable for equality comparison
+normalize(#?STATE{release_cursors = Cursors} = State) ->
+ State#?STATE{release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}.
+
+is_over_limit(#?STATE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}}) ->
+ false;
+is_over_limit(#?STATE{cfg = #cfg{max_length = MaxLength,
+ max_bytes = MaxBytes},
+ msg_bytes_enqueue = BytesEnq} = State) ->
+
+ messages_ready(State) > MaxLength orelse (BytesEnq > MaxBytes).
+
+normalize_for_v1(#?STATE{cfg = Cfg} = State) ->
+ %% run all v0 conversions so that v1 does not have to have this code
+ RCI = case Cfg of
+ #cfg{release_cursor_interval = {_, _} = R} ->
+ R;
+ #cfg{release_cursor_interval = undefined} ->
+ {?RELEASE_CURSOR_EVERY, ?RELEASE_CURSOR_EVERY};
+ #cfg{release_cursor_interval = C} ->
+ {?RELEASE_CURSOR_EVERY, C}
+ end,
+ convert_prefix_msgs(
+ State#?STATE{cfg = Cfg#cfg{release_cursor_interval = RCI}}).
+
+get_field(Field, State) ->
+ Fields = record_info(fields, ?STATE),
+ Index = record_index_of(Field, Fields),
+ element(Index, State).
+
+get_cfg_field(Field, #?STATE{cfg = Cfg} ) ->
+ Fields = record_info(fields, cfg),
+ Index = record_index_of(Field, Fields),
+ element(Index, Cfg).
+
+record_index_of(F, Fields) ->
+ index_of(2, F, Fields).
+
+index_of(_, F, []) ->
+ exit({field_not_found, F});
+index_of(N, F, [F | _]) ->
+ N;
+index_of(N, F, [_ | T]) ->
+ index_of(N+1, F, T).
+
+-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol().
+make_enqueue(Pid, Seq, Msg) ->
+ #enqueue{pid = Pid, seq = Seq, msg = Msg}.
+-spec make_checkout(consumer_id(),
+ checkout_spec(), consumer_meta()) -> protocol().
+make_checkout(ConsumerId, Spec, Meta) ->
+ #checkout{consumer_id = ConsumerId,
+ spec = Spec, meta = Meta}.
+
+-spec make_settle(consumer_id(), [msg_id()]) -> protocol().
+make_settle(ConsumerId, MsgIds) ->
+ #settle{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_return(consumer_id(), [msg_id()]) -> protocol().
+make_return(ConsumerId, MsgIds) ->
+ #return{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_discard(consumer_id(), [msg_id()]) -> protocol().
+make_discard(ConsumerId, MsgIds) ->
+ #discard{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_credit(consumer_id(), non_neg_integer(), non_neg_integer(),
+ boolean()) -> protocol().
+make_credit(ConsumerId, Credit, DeliveryCount, Drain) ->
+ #credit{consumer_id = ConsumerId,
+ credit = Credit,
+ delivery_count = DeliveryCount,
+ drain = Drain}.
+
+-spec make_purge() -> protocol().
+make_purge() -> #purge{}.
+
+-spec make_purge_nodes([node()]) -> protocol().
+make_purge_nodes(Nodes) ->
+ #purge_nodes{nodes = Nodes}.
+
+-spec make_update_config(config()) -> protocol().
+make_update_config(Config) ->
+ #update_config{config = Config}.
+
+add_bytes_enqueue(Bytes,
+ #?STATE{msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_enqueue = Enqueue + Bytes};
+add_bytes_enqueue(#{size := Bytes}, State) ->
+ add_bytes_enqueue(Bytes, State).
+
+add_bytes_drop(Bytes,
+ #?STATE{msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_enqueue = Enqueue - Bytes};
+add_bytes_drop(#{size := Bytes}, State) ->
+ add_bytes_drop(Bytes, State).
+
+add_bytes_checkout(Bytes,
+ #?STATE{msg_bytes_checkout = Checkout,
+ msg_bytes_enqueue = Enqueue } = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_checkout = Checkout + Bytes,
+ msg_bytes_enqueue = Enqueue - Bytes};
+add_bytes_checkout(#{size := Bytes}, State) ->
+ add_bytes_checkout(Bytes, State).
+
+add_bytes_settle(Bytes,
+ #?STATE{msg_bytes_checkout = Checkout} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_checkout = Checkout - Bytes};
+add_bytes_settle(#{size := Bytes}, State) ->
+ add_bytes_settle(Bytes, State).
+
+add_bytes_return(Bytes,
+ #?STATE{msg_bytes_checkout = Checkout,
+ msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_checkout = Checkout - Bytes,
+ msg_bytes_enqueue = Enqueue + Bytes};
+add_bytes_return(#{size := Bytes}, State) ->
+ add_bytes_return(Bytes, State).
+
+add_in_memory_counts(Bytes,
+ #?STATE{msg_bytes_in_memory = InMemoryBytes,
+ msgs_ready_in_memory = InMemoryCount} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_in_memory = InMemoryBytes + Bytes,
+ msgs_ready_in_memory = InMemoryCount + 1};
+add_in_memory_counts(#{size := Bytes}, State) ->
+ add_in_memory_counts(Bytes, State).
+
+subtract_in_memory_counts(Bytes,
+ #?STATE{msg_bytes_in_memory = InMemoryBytes,
+ msgs_ready_in_memory = InMemoryCount} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_in_memory = InMemoryBytes - Bytes,
+ msgs_ready_in_memory = InMemoryCount - 1};
+subtract_in_memory_counts(#{size := Bytes}, State) ->
+ subtract_in_memory_counts(Bytes, State).
+
+message_size(#basic_message{content = Content}) ->
+ #content{payload_fragments_rev = PFR} = Content,
+ iolist_size(PFR);
+message_size({'$prefix_msg', H}) ->
+ get_size_from_header(H);
+message_size({'$empty_msg', H}) ->
+ get_size_from_header(H);
+message_size(B) when is_binary(B) ->
+ byte_size(B);
+message_size(Msg) ->
+ %% probably only hit this for testing so ok to use erts_debug
+ erts_debug:size(Msg).
+
+get_size_from_header(Size) when is_integer(Size) ->
+ Size;
+get_size_from_header(#{size := B}) ->
+ B.
+
+
+all_nodes(#?STATE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Nodes0 = maps:fold(fun({_, P}, _, Acc) ->
+ Acc#{node(P) => ok}
+ end, #{}, Cons0),
+ Nodes1 = maps:fold(fun(P, _, Acc) ->
+ Acc#{node(P) => ok}
+ end, Nodes0, Enqs0),
+ maps:keys(
+ lists:foldl(fun({{_, P}, _}, Acc) ->
+ Acc#{node(P) => ok}
+ end, Nodes1, WaitingConsumers0)).
+
+all_pids_for(Node, #?STATE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Cons = maps:fold(fun({_, P}, _, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, [], Cons0),
+ Enqs = maps:fold(fun(P, _, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, Cons, Enqs0),
+ lists:foldl(fun({{_, P}, _}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, Acc) -> Acc
+ end, Enqs, WaitingConsumers0).
+
+suspected_pids_for(Node, #?STATE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Cons = maps:fold(fun({_, P}, #consumer{status = suspected_down}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, [], Cons0),
+ Enqs = maps:fold(fun(P, #enqueuer{status = suspected_down}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, Cons, Enqs0),
+ lists:foldl(fun({{_, P},
+ #consumer{status = suspected_down}}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, Acc) -> Acc
+ end, Enqs, WaitingConsumers0).
diff --git a/deps/rabbit/src/rabbit_fifo_v0.hrl b/deps/rabbit/src/rabbit_fifo_v0.hrl
new file mode 100644
index 0000000000..333ccb4d77
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo_v0.hrl
@@ -0,0 +1,195 @@
+
+-type option(T) :: undefined | T.
+
+-type raw_msg() :: term().
+%% The raw message. It is opaque to rabbit_fifo.
+
+-type msg_in_id() :: non_neg_integer().
+% a queue scoped monotonically incrementing integer used to enforce order
+% in the unassigned messages map
+
+-type msg_id() :: non_neg_integer().
+%% A consumer-scoped monotonically incrementing integer included with a
+%% {@link delivery/0.}. Used to settle deliveries using
+%% {@link rabbit_fifo_client:settle/3.}
+
+-type msg_seqno() :: non_neg_integer().
+%% A sender process scoped monotonically incrementing integer included
+%% in enqueue messages. Used to ensure ordering of messages send from the
+%% same process
+
+-type msg_header() :: msg_size() |
+ #{size := msg_size(),
+ delivery_count => non_neg_integer()}.
+%% The message header:
+%% delivery_count: the number of unsuccessful delivery attempts.
+%% A non-zero value indicates a previous attempt.
+%% If it only contains the size it can be condensed to an integer only
+
+-type msg() :: {msg_header(), raw_msg()}.
+%% message with a header map.
+
+-type msg_size() :: non_neg_integer().
+%% the size in bytes of the msg payload
+
+-type indexed_msg() :: {ra:index(), msg()}.
+
+-type prefix_msg() :: {'$prefix_msg', msg_header()}.
+
+-type delivery_msg() :: {msg_id(), msg()}.
+%% A tuple consisting of the message id and the headered message.
+
+-type consumer_tag() :: binary().
+%% An arbitrary binary tag used to distinguish between different consumers
+%% set up by the same process. See: {@link rabbit_fifo_client:checkout/3.}
+
+-type delivery() :: {delivery, consumer_tag(), [delivery_msg()]}.
+%% Represents the delivery of one or more rabbit_fifo messages.
+
+-type consumer_id() :: {consumer_tag(), pid()}.
+%% The entity that receives messages. Uniquely identifies a consumer.
+
+-type credit_mode() :: simple_prefetch | credited.
+%% determines how credit is replenished
+
+-type checkout_spec() :: {once | auto, Num :: non_neg_integer(),
+ credit_mode()} |
+ {dequeue, settled | unsettled} |
+ cancel.
+
+-type consumer_meta() :: #{ack => boolean(),
+ username => binary(),
+ prefetch => non_neg_integer(),
+ args => list()}.
+%% static meta data associated with a consumer
+
+
+-type applied_mfa() :: {module(), atom(), list()}.
+% represents a partially applied module call
+
+-define(RELEASE_CURSOR_EVERY, 64000).
+-define(RELEASE_CURSOR_EVERY_MAX, 3200000).
+-define(USE_AVG_HALF_LIFE, 10000.0).
+%% an average QQ without any message uses about 100KB so setting this limit
+%% to ~10 times that should be relatively safe.
+-define(GC_MEM_LIMIT_B, 2000000).
+
+-define(MB, 1048576).
+-define(STATE, rabbit_fifo).
+
+-record(consumer,
+ {meta = #{} :: consumer_meta(),
+ checked_out = #{} :: #{msg_id() => {msg_in_id(), indexed_msg()}},
+ next_msg_id = 0 :: msg_id(), % part of snapshot data
+ %% max number of messages that can be sent
+ %% decremented for each delivery
+ credit = 0 : non_neg_integer(),
+ %% total number of checked out messages - ever
+ %% incremented for each delivery
+ delivery_count = 0 :: non_neg_integer(),
+ %% the mode of how credit is incremented
+ %% simple_prefetch: credit is re-filled as deliveries are settled
+ %% or returned.
+ %% credited: credit can only be changed by receiving a consumer_credit
+ %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}'
+ credit_mode = simple_prefetch :: credit_mode(), % part of snapshot data
+ lifetime = once :: once | auto,
+ status = up :: up | suspected_down | cancelled
+ }).
+
+-type consumer() :: #consumer{}.
+
+-type consumer_strategy() :: competing | single_active.
+
+-record(enqueuer,
+ {next_seqno = 1 :: msg_seqno(),
+ % out of order enqueues - sorted list
+ pending = [] :: [{msg_seqno(), ra:index(), raw_msg()}],
+ status = up :: up | suspected_down
+ }).
+
+-record(cfg,
+ {name :: atom(),
+ resource :: rabbit_types:r('queue'),
+ release_cursor_interval ::
+ undefined | non_neg_integer() |
+ {non_neg_integer(), non_neg_integer()},
+ dead_letter_handler :: option(applied_mfa()),
+ become_leader_handler :: option(applied_mfa()),
+ max_length :: option(non_neg_integer()),
+ max_bytes :: option(non_neg_integer()),
+ %% whether single active consumer is on or not for this queue
+ consumer_strategy = competing :: consumer_strategy(),
+ %% the maximum number of unsuccessful delivery attempts permitted
+ delivery_limit :: option(non_neg_integer()),
+ max_in_memory_length :: option(non_neg_integer()),
+ max_in_memory_bytes :: option(non_neg_integer())
+ }).
+
+-type prefix_msgs() :: {list(), list()} |
+ {non_neg_integer(), list(),
+ non_neg_integer(), list()}.
+
+-record(?STATE,
+ {cfg :: #cfg{},
+ % unassigned messages
+ messages = #{} :: #{msg_in_id() => indexed_msg()},
+ % defines the lowest message in id available in the messages map
+ % that isn't a return
+ low_msg_num :: option(msg_in_id()),
+ % defines the next message in id to be added to the messages map
+ next_msg_num = 1 :: msg_in_id(),
+ % list of returned msg_in_ids - when checking out it picks from
+ % this list first before taking low_msg_num
+ returns = lqueue:new() :: lqueue:lqueue(prefix_msg() |
+ {msg_in_id(), indexed_msg()}),
+ % a counter of enqueues - used to trigger shadow copy points
+ enqueue_count = 0 :: non_neg_integer(),
+ % a map containing all the live processes that have ever enqueued
+ % a message to this queue as well as a cached value of the smallest
+ % ra_index of all pending enqueues
+ enqueuers = #{} :: #{pid() => #enqueuer{}},
+ % master index of all enqueue raft indexes including pending
+ % enqueues
+ % rabbit_fifo_index can be slow when calculating the smallest
+ % index when there are large gaps but should be faster than gb_trees
+ % for normal appending operations as it's backed by a map
+ ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(),
+ release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor,
+ ra:index(), #?STATE{}}),
+ % consumers need to reflect consumer state at time of snapshot
+ % needs to be part of snapshot
+ consumers = #{} :: #{consumer_id() => #consumer{}},
+ % consumers that require further service are queued here
+ % needs to be part of snapshot
+ service_queue = queue:new() :: queue:queue(consumer_id()),
+ %% This is a special field that is only used for snapshots
+ %% It represents the queued messages at the time the
+ %% dehydrated snapshot state was cached.
+ %% As release_cursors are only emitted for raft indexes where all
+ %% prior messages no longer contribute to the current state we can
+ %% replace all message payloads with their sizes (to be used for
+ %% overflow calculations).
+ %% This is done so that consumers are still served in a deterministic
+ %% order on recovery.
+ prefix_msgs = {0, [], 0, []} :: prefix_msgs(),
+ msg_bytes_enqueue = 0 :: non_neg_integer(),
+ msg_bytes_checkout = 0 :: non_neg_integer(),
+ %% waiting consumers, one is picked active consumer is cancelled or dies
+ %% used only when single active consumer is on
+ waiting_consumers = [] :: [{consumer_id(), consumer()}],
+ msg_bytes_in_memory = 0 :: non_neg_integer(),
+ msgs_ready_in_memory = 0 :: non_neg_integer()
+ }).
+
+-type config() :: #{name := atom(),
+ queue_resource := rabbit_types:r('queue'),
+ dead_letter_handler => applied_mfa(),
+ become_leader_handler => applied_mfa(),
+ release_cursor_interval => non_neg_integer(),
+ max_length => non_neg_integer(),
+ max_bytes => non_neg_integer(),
+ max_in_memory_length => non_neg_integer(),
+ max_in_memory_bytes => non_neg_integer(),
+ single_active_consumer_on => boolean(),
+ delivery_limit => non_neg_integer()}.
diff --git a/deps/rabbit/src/rabbit_file.erl b/deps/rabbit/src/rabbit_file.erl
new file mode 100644
index 0000000000..f8263d9e77
--- /dev/null
+++ b/deps/rabbit/src/rabbit_file.erl
@@ -0,0 +1,321 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_file).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([is_file/1, is_dir/1, file_size/1, ensure_dir/1, wildcard/2, list_dir/1]).
+-export([read_term_file/1, write_term_file/2, write_file/2, write_file/3]).
+-export([append_file/2, ensure_parent_dirs_exist/1]).
+-export([rename/2, delete/1, recursive_delete/1, recursive_copy/2]).
+-export([lock_file/1]).
+-export([read_file_info/1]).
+-export([filename_as_a_directory/1]).
+
+-import(file_handle_cache, [with_handle/1, with_handle/2]).
+
+-define(TMP_EXT, ".tmp").
+
+%%----------------------------------------------------------------------------
+
+-type ok_or_error() :: rabbit_types:ok_or_error(any()).
+
+%%----------------------------------------------------------------------------
+
+-spec is_file((file:filename())) -> boolean().
+
+is_file(File) ->
+ case read_file_info(File) of
+ {ok, #file_info{type=regular}} -> true;
+ {ok, #file_info{type=directory}} -> true;
+ _ -> false
+ end.
+
+-spec is_dir((file:filename())) -> boolean().
+
+is_dir(Dir) -> is_dir_internal(read_file_info(Dir)).
+
+is_dir_no_handle(Dir) -> is_dir_internal(prim_file:read_file_info(Dir)).
+
+is_dir_internal({ok, #file_info{type=directory}}) -> true;
+is_dir_internal(_) -> false.
+
+-spec file_size((file:filename())) -> non_neg_integer().
+
+file_size(File) ->
+ case read_file_info(File) of
+ {ok, #file_info{size=Size}} -> Size;
+ _ -> 0
+ end.
+
+-spec ensure_dir((file:filename())) -> ok_or_error().
+
+ensure_dir(File) -> with_handle(fun () -> ensure_dir_internal(File) end).
+
+ensure_dir_internal("/") ->
+ ok;
+ensure_dir_internal(File) ->
+ Dir = filename:dirname(File),
+ case is_dir_no_handle(Dir) of
+ true -> ok;
+ false -> ensure_dir_internal(Dir),
+ prim_file:make_dir(Dir)
+ end.
+
+-spec wildcard(string(), file:filename()) -> [file:filename()].
+
+wildcard(Pattern, Dir) ->
+ case list_dir(Dir) of
+ {ok, Files} -> {ok, RE} = re:compile(Pattern, [anchored]),
+ [File || File <- Files,
+ match =:= re:run(File, RE, [{capture, none}])];
+ {error, _} -> []
+ end.
+
+-spec list_dir(file:filename()) ->
+ rabbit_types:ok_or_error2([file:filename()], any()).
+
+list_dir(Dir) -> with_handle(fun () -> prim_file:list_dir(Dir) end).
+
+read_file_info(File) ->
+ with_handle(fun () -> prim_file:read_file_info(File) end).
+
+-spec read_term_file
+ (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any()).
+
+read_term_file(File) ->
+ try
+ {ok, Data} = with_handle(fun () -> prim_file:read_file(File) end),
+ {ok, Tokens, _} = erl_scan:string(binary_to_list(Data)),
+ TokenGroups = group_tokens(Tokens),
+ {ok, [begin
+ {ok, Term} = erl_parse:parse_term(Tokens1),
+ Term
+ end || Tokens1 <- TokenGroups]}
+ catch
+ error:{badmatch, Error} -> Error
+ end.
+
+group_tokens(Ts) -> [lists:reverse(G) || G <- group_tokens([], Ts)].
+
+group_tokens([], []) -> [];
+group_tokens(Cur, []) -> [Cur];
+group_tokens(Cur, [T = {dot, _} | Ts]) -> [[T | Cur] | group_tokens([], Ts)];
+group_tokens(Cur, [T | Ts]) -> group_tokens([T | Cur], Ts).
+
+-spec write_term_file(file:filename(), [any()]) -> ok_or_error().
+
+write_term_file(File, Terms) ->
+ write_file(File, list_to_binary([io_lib:format("~w.~n", [Term]) ||
+ Term <- Terms])).
+
+-spec write_file(file:filename(), iodata()) -> ok_or_error().
+
+write_file(Path, Data) -> write_file(Path, Data, []).
+
+-spec write_file(file:filename(), iodata(), [any()]) -> ok_or_error().
+
+write_file(Path, Data, Modes) ->
+ Modes1 = [binary, write | (Modes -- [binary, write])],
+ case make_binary(Data) of
+ Bin when is_binary(Bin) -> write_file1(Path, Bin, Modes1);
+ {error, _} = E -> E
+ end.
+
+%% make_binary/1 is based on the corresponding function in the
+%% kernel/file.erl module of the Erlang R14B02 release, which is
+%% licensed under the EPL.
+
+make_binary(Bin) when is_binary(Bin) ->
+ Bin;
+make_binary(List) ->
+ try
+ iolist_to_binary(List)
+ catch error:Reason ->
+ {error, Reason}
+ end.
+
+write_file1(Path, Bin, Modes) ->
+ try
+ with_synced_copy(Path, Modes,
+ fun (Hdl) ->
+ ok = prim_file:write(Hdl, Bin)
+ end)
+ catch
+ error:{badmatch, Error} -> Error;
+ _:{error, Error} -> {error, Error}
+ end.
+
+with_synced_copy(Path, Modes, Fun) ->
+ case lists:member(append, Modes) of
+ true ->
+ {error, append_not_supported, Path};
+ false ->
+ with_handle(
+ fun () ->
+ Bak = Path ++ ?TMP_EXT,
+ case prim_file:open(Bak, Modes) of
+ {ok, Hdl} ->
+ try
+ Result = Fun(Hdl),
+ ok = prim_file:sync(Hdl),
+ ok = prim_file:rename(Bak, Path),
+ Result
+ after
+ prim_file:close(Hdl)
+ end;
+ {error, _} = E -> E
+ end
+ end)
+ end.
+
+%% TODO the semantics of this function are rather odd. But see bug 25021.
+
+-spec append_file(file:filename(), string()) -> ok_or_error().
+
+append_file(File, Suffix) ->
+ case read_file_info(File) of
+ {ok, FInfo} -> append_file(File, FInfo#file_info.size, Suffix);
+ {error, enoent} -> append_file(File, 0, Suffix);
+ Error -> Error
+ end.
+
+append_file(_, _, "") ->
+ ok;
+append_file(File, 0, Suffix) ->
+ with_handle(fun () ->
+ case prim_file:open([File, Suffix], [append]) of
+ {ok, Fd} -> prim_file:close(Fd);
+ Error -> Error
+ end
+ end);
+append_file(File, _, Suffix) ->
+ case with_handle(2, fun () ->
+ file:copy(File, {[File, Suffix], [append]})
+ end) of
+ {ok, _BytesCopied} -> ok;
+ Error -> Error
+ end.
+
+-spec ensure_parent_dirs_exist(string()) -> 'ok'.
+
+ensure_parent_dirs_exist(Filename) ->
+ case ensure_dir(Filename) of
+ ok -> ok;
+ {error, Reason} ->
+ throw({error, {cannot_create_parent_dirs, Filename, Reason}})
+ end.
+
+-spec rename(file:filename(), file:filename()) -> ok_or_error().
+
+rename(Old, New) -> with_handle(fun () -> prim_file:rename(Old, New) end).
+
+-spec delete([file:filename()]) -> ok_or_error().
+
+delete(File) -> with_handle(fun () -> prim_file:delete(File) end).
+
+-spec recursive_delete([file:filename()]) ->
+ rabbit_types:ok_or_error({file:filename(), any()}).
+
+recursive_delete(Files) ->
+ with_handle(
+ fun () -> lists:foldl(fun (Path, ok) -> recursive_delete1(Path);
+ (_Path, {error, _Err} = Error) -> Error
+ end, ok, Files)
+ end).
+
+recursive_delete1(Path) ->
+ case is_dir_no_handle(Path) and not(is_symlink_no_handle(Path)) of
+ false -> case prim_file:delete(Path) of
+ ok -> ok;
+ {error, enoent} -> ok; %% Path doesn't exist anyway
+ {error, Err} -> {error, {Path, Err}}
+ end;
+ true -> case prim_file:list_dir(Path) of
+ {ok, FileNames} ->
+ case lists:foldl(
+ fun (FileName, ok) ->
+ recursive_delete1(
+ filename:join(Path, FileName));
+ (_FileName, Error) ->
+ Error
+ end, ok, FileNames) of
+ ok ->
+ case prim_file:del_dir(Path) of
+ ok -> ok;
+ {error, Err} -> {error, {Path, Err}}
+ end;
+ {error, _Err} = Error ->
+ Error
+ end;
+ {error, Err} ->
+ {error, {Path, Err}}
+ end
+ end.
+
+is_symlink_no_handle(File) ->
+ case prim_file:read_link(File) of
+ {ok, _} -> true;
+ _ -> false
+ end.
+
+-spec recursive_copy(file:filename(), file:filename()) ->
+ rabbit_types:ok_or_error({file:filename(), file:filename(), any()}).
+
+recursive_copy(Src, Dest) ->
+ %% Note that this uses the 'file' module and, hence, shouldn't be
+ %% run on many processes at once.
+ case is_dir(Src) of
+ false -> case file:copy(Src, Dest) of
+ {ok, _Bytes} -> ok;
+ {error, enoent} -> ok; %% Path doesn't exist anyway
+ {error, Err} -> {error, {Src, Dest, Err}}
+ end;
+ true -> case file:list_dir(Src) of
+ {ok, FileNames} ->
+ case file:make_dir(Dest) of
+ ok ->
+ lists:foldl(
+ fun (FileName, ok) ->
+ recursive_copy(
+ filename:join(Src, FileName),
+ filename:join(Dest, FileName));
+ (_FileName, Error) ->
+ Error
+ end, ok, FileNames);
+ {error, Err} ->
+ {error, {Src, Dest, Err}}
+ end;
+ {error, Err} ->
+ {error, {Src, Dest, Err}}
+ end
+ end.
+
+%% TODO: When we stop supporting Erlang prior to R14, this should be
+%% replaced with file:open [write, exclusive]
+
+-spec lock_file(file:filename()) -> rabbit_types:ok_or_error('eexist').
+
+lock_file(Path) ->
+ case is_file(Path) of
+ true -> {error, eexist};
+ false -> with_handle(
+ fun () -> {ok, Lock} = prim_file:open(Path, [write]),
+ ok = prim_file:close(Lock)
+ end)
+ end.
+
+-spec filename_as_a_directory(file:filename()) -> file:filename().
+
+filename_as_a_directory(FileName) ->
+ case lists:last(FileName) of
+ "/" ->
+ FileName;
+ _ ->
+ FileName ++ "/"
+ end.
diff --git a/deps/rabbit/src/rabbit_framing.erl b/deps/rabbit/src/rabbit_framing.erl
new file mode 100644
index 0000000000..42927b2b68
--- /dev/null
+++ b/deps/rabbit/src/rabbit_framing.erl
@@ -0,0 +1,36 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% TODO auto-generate
+
+-module(rabbit_framing).
+
+-export_type([protocol/0,
+ amqp_field_type/0, amqp_property_type/0,
+ amqp_table/0, amqp_array/0, amqp_value/0,
+ amqp_method_name/0, amqp_method/0, amqp_method_record/0,
+ amqp_method_field_name/0, amqp_property_record/0,
+ amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
+
+-type protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1'.
+
+-define(protocol_type(T), type(T :: rabbit_framing_amqp_0_8:T |
+ rabbit_framing_amqp_0_9_1:T)).
+
+-?protocol_type(amqp_field_type()).
+-?protocol_type(amqp_property_type()).
+-?protocol_type(amqp_table()).
+-?protocol_type(amqp_array()).
+-?protocol_type(amqp_value()).
+-?protocol_type(amqp_method_name()).
+-?protocol_type(amqp_method()).
+-?protocol_type(amqp_method_record()).
+-?protocol_type(amqp_method_field_name()).
+-?protocol_type(amqp_property_record()).
+-?protocol_type(amqp_exception()).
+-?protocol_type(amqp_exception_code()).
+-?protocol_type(amqp_class_id()).
diff --git a/deps/rabbit/src/rabbit_guid.erl b/deps/rabbit/src/rabbit_guid.erl
new file mode 100644
index 0000000000..01e6464332
--- /dev/null
+++ b/deps/rabbit/src/rabbit_guid.erl
@@ -0,0 +1,181 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_guid).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([filename/0]).
+-export([gen/0, gen_secure/0, string/2, binary/2, to_string/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(SERVER, ?MODULE).
+-define(SERIAL_FILENAME, "rabbit_serial").
+
+-record(state, {serial}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([guid/0]).
+
+-type guid() :: binary().
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE,
+ [update_disk_serial()], []).
+
+%% We use this to detect a (possibly rather old) Mnesia directory,
+%% since it has existed since at least 1.7.0 (as far back as I cared
+%% to go).
+
+-spec filename() -> string().
+
+filename() ->
+ filename:join(rabbit_mnesia:dir(), ?SERIAL_FILENAME).
+
+update_disk_serial() ->
+ Filename = filename(),
+ Serial = case rabbit_file:read_term_file(Filename) of
+ {ok, [Num]} -> Num;
+ {ok, []} -> 0; %% [1]
+ {error, enoent} -> 0;
+ {error, Reason} ->
+ throw({error, {cannot_read_serial_file, Filename, Reason}})
+ end,
+ case rabbit_file:write_term_file(Filename, [Serial + 1]) of
+ ok -> ok;
+ {error, Reason1} ->
+ throw({error, {cannot_write_serial_file, Filename, Reason1}})
+ end,
+ Serial.
+%% [1] a couple of users have reported startup failures due to the
+%% file being empty, presumably as a result of filesystem
+%% corruption. While rabbit doesn't cope with that in general, in this
+%% specific case we can be more accommodating.
+
+%% Generate an un-hashed guid.
+fresh() ->
+ %% We don't use erlang:now() here because a) it may return
+ %% duplicates when the system clock has been rewound prior to a
+ %% restart, or ids were generated at a high rate (which causes
+ %% now() to move ahead of the system time), and b) it is really
+ %% slow since it takes a global lock and makes a system call.
+ %%
+ %% A persisted serial number, the node, and a unique reference
+ %% (per node incarnation) uniquely identifies a process in space
+ %% and time.
+ Serial = gen_server:call(?SERVER, serial, infinity),
+ {Serial, node(), make_ref()}.
+
+advance_blocks({B1, B2, B3, B4}, I) ->
+ %% To produce a new set of blocks, we create a new 32bit block
+ %% hashing {B5, I}. The new hash is used as last block, and the
+ %% other three blocks are XORed with it.
+ %%
+ %% Doing this is convenient because it avoids cascading conflicts,
+ %% while being very fast. The conflicts are avoided by propagating
+ %% the changes through all the blocks at each round by XORing, so
+ %% the only occasion in which a collision will take place is when
+ %% all 4 blocks are the same and the counter is the same.
+ %%
+ %% The range (2^32) is provided explicitly since phash uses 2^27
+ %% by default.
+ B5 = erlang:phash2({B1, I}, 4294967296),
+ {{(B2 bxor B5), (B3 bxor B5), (B4 bxor B5), B5}, I+1}.
+
+%% generate a GUID. This function should be used when performance is a
+%% priority and predictability is not an issue. Otherwise use
+%% gen_secure/0.
+
+-spec gen() -> guid().
+
+gen() ->
+ %% We hash a fresh GUID with md5, split it in 4 blocks, and each
+ %% time we need a new guid we rotate them producing a new hash
+ %% with the aid of the counter. Look at the comments in
+ %% advance_blocks/2 for details.
+ case get(guid) of
+ undefined -> <<B1:32, B2:32, B3:32, B4:32>> = Res =
+ erlang:md5(term_to_binary(fresh())),
+ put(guid, {{B1, B2, B3, B4}, 0}),
+ Res;
+ {BS, I} -> {{B1, B2, B3, B4}, _} = S = advance_blocks(BS, I),
+ put(guid, S),
+ <<B1:32, B2:32, B3:32, B4:32>>
+ end.
+
+%% generate a non-predictable GUID.
+%%
+%% The id is only unique within a single cluster and as long as the
+%% serial store hasn't been deleted.
+%%
+%% If you are not concerned with predictability, gen/0 is faster.
+
+-spec gen_secure() -> guid().
+
+gen_secure() ->
+ %% Here instead of hashing once we hash the GUID and the counter
+ %% each time, so that the GUID is not predictable.
+ G = case get(guid_secure) of
+ undefined -> {fresh(), 0};
+ {S, I} -> {S, I+1}
+ end,
+ put(guid_secure, G),
+ erlang:md5(term_to_binary(G)).
+
+%% generate a readable string representation of a GUID.
+%%
+%% employs base64url encoding, which is safer in more contexts than
+%% plain base64.
+
+-spec string(guid() | string(), any()) -> string().
+
+string(G, Prefix) when is_list(Prefix) ->
+ Prefix ++ "-" ++ rabbit_misc:base64url(G);
+string(G, Prefix) when is_binary(Prefix) ->
+ binary_to_list(Prefix) ++ "-" ++ rabbit_misc:base64url(G).
+
+-spec binary(guid() | string(), any()) -> binary().
+
+binary(G, Prefix) ->
+ list_to_binary(string(G, Prefix)).
+
+%% copied from https://stackoverflow.com/questions/1657204/erlang-uuid-generator
+to_string(<<TL:32, TM:16, THV:16, CSR:8, CSL:8, N:48>>) ->
+ lists:flatten(
+ io_lib:format("~8.16.0b-~4.16.0b-~4.16.0b-~2.16.0b~2.16.0b-~12.16.0b",
+ [TL, TM, THV, CSR, CSL, N])).
+
+%%----------------------------------------------------------------------------
+
+init([Serial]) ->
+ {ok, #state{serial = Serial}}.
+
+handle_call(serial, _From, State = #state{serial = Serial}) ->
+ {reply, Serial, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_health_check.erl b/deps/rabbit/src/rabbit_health_check.erl
new file mode 100644
index 0000000000..4674ca7d8e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_health_check.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_health_check).
+
+%% External API
+-export([node/1, node/2]).
+
+%% Internal API
+-export([local/0]).
+
+%%----------------------------------------------------------------------------
+%% External functions
+%%----------------------------------------------------------------------------
+
+-spec node(node(), timeout()) -> ok | {badrpc, term()} | {error_string, string()}.
+
+node(Node) ->
+ %% same default as in CLI
+ node(Node, 70000).
+node(Node, Timeout) ->
+ rabbit_misc:rpc_call(Node, rabbit_health_check, local, [], Timeout).
+
+-spec local() -> ok | {error_string, string()}.
+
+local() ->
+ rabbit_log:warning("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. "
+ "See https://www.rabbitmq.com/monitoring.html#health-checks for replacement options."),
+ run_checks([list_channels, list_queues, alarms, rabbit_node_monitor]).
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+run_checks([]) ->
+ ok;
+run_checks([C|Cs]) ->
+ case node_health_check(C) of
+ ok ->
+ run_checks(Cs);
+ Error ->
+ Error
+ end.
+
+node_health_check(list_channels) ->
+ case rabbit_channel:info_local([pid]) of
+ L when is_list(L) ->
+ ok
+ end;
+
+node_health_check(list_queues) ->
+ health_check_queues(rabbit_vhost:list_names());
+
+node_health_check(rabbit_node_monitor) ->
+ case rabbit_node_monitor:partitions() of
+ [] ->
+ ok;
+ L when is_list(L), length(L) > 0 ->
+ ErrorMsg = io_lib:format("cluster partition in effect: ~p", [L]),
+ {error_string, ErrorMsg}
+ end;
+
+node_health_check(alarms) ->
+ case proplists:get_value(alarms, rabbit:status()) of
+ [] ->
+ ok;
+ Alarms ->
+ ErrorMsg = io_lib:format("resource alarm(s) in effect:~p", [Alarms]),
+ {error_string, ErrorMsg}
+ end.
+
+health_check_queues([]) ->
+ ok;
+health_check_queues([VHost|RestVHosts]) ->
+ case rabbit_amqqueue:info_local(VHost) of
+ L when is_list(L) ->
+ health_check_queues(RestVHosts)
+ end.
diff --git a/deps/rabbit/src/rabbit_lager.erl b/deps/rabbit/src/rabbit_lager.erl
new file mode 100644
index 0000000000..3cbc5e431d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_lager.erl
@@ -0,0 +1,723 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_lager).
+
+-include_lib("rabbit_common/include/rabbit_log.hrl").
+
+%% API
+-export([start_logger/0, log_locations/0, fold_sinks/2,
+ broker_is_started/0, set_log_level/1]).
+
+%% For test purposes
+-export([configure_lager/0]).
+
+-export_type([log_location/0]).
+
+-type log_location() :: string().
+
+start_logger() ->
+ ok = maybe_remove_logger_handler(),
+ ok = app_utils:stop_applications([lager, syslog]),
+ ok = ensure_lager_configured(),
+ ok = app_utils:start_applications([lager]),
+ fold_sinks(
+ fun
+ (_, [], Acc) ->
+ Acc;
+ (SinkName, _, Acc) ->
+ lager:log(SinkName, info, self(),
+ "Log file opened with Lager", []),
+ Acc
+ end, ok),
+ ensure_log_working().
+
+broker_is_started() ->
+ {ok, HwmCurrent} = application:get_env(lager, error_logger_hwm),
+ {ok, HwmOrig0} = application:get_env(lager, error_logger_hwm_original),
+ HwmOrig = case get_most_verbose_log_level() of
+ debug -> HwmOrig0 * 100;
+ _ -> HwmOrig0
+ end,
+ case HwmOrig =:= HwmCurrent of
+ false ->
+ ok = application:set_env(lager, error_logger_hwm, HwmOrig),
+ Handlers = gen_event:which_handlers(lager_event),
+ lists:foreach(fun(Handler) ->
+ lager:set_loghwm(Handler, HwmOrig)
+ end, Handlers),
+ ok;
+ _ ->
+ ok
+ end.
+
+set_log_level(Level) ->
+ IsValidLevel = lists:member(Level, lager_util:levels()),
+ set_log_level(IsValidLevel, Level).
+
+set_log_level(true, Level) ->
+ SinksAndHandlers = [{Sink, gen_event:which_handlers(Sink)} ||
+ Sink <- lager:list_all_sinks()],
+ DefaultHwm = application:get_env(lager, error_logger_hwm_original, 50),
+ Hwm = case Level of
+ debug -> DefaultHwm * 100;
+ _ -> DefaultHwm
+ end,
+ application:set_env(lager, error_logger_hwm, Hwm),
+ set_sink_log_level(SinksAndHandlers, Level, Hwm);
+set_log_level(_, Level) ->
+ {error, {invalid_log_level, Level}}.
+
+set_sink_log_level([], _Level, _Hwm) ->
+ ok;
+set_sink_log_level([{Sink, Handlers}|Rest], Level, Hwm) ->
+ set_sink_handler_log_level(Sink, Handlers, Level, Hwm),
+ set_sink_log_level(Rest, Level, Hwm).
+
+set_sink_handler_log_level(_Sink, [], _Level, _Hwm) ->
+ ok;
+set_sink_handler_log_level(Sink, [Handler|Rest], Level, Hwm)
+ when is_atom(Handler) andalso is_integer(Hwm) ->
+ lager:set_loghwm(Sink, Handler, undefined, Hwm),
+ ok = lager:set_loglevel(Sink, Handler, undefined, Level),
+ set_sink_handler_log_level(Sink, Rest, Level, Hwm);
+set_sink_handler_log_level(Sink, [{Handler, Id}|Rest], Level, Hwm) ->
+ lager:set_loghwm(Sink, Handler, Id, Hwm),
+ ok = lager:set_loglevel(Sink, Handler, Id, Level),
+ set_sink_handler_log_level(Sink, Rest, Level, Hwm);
+set_sink_handler_log_level(Sink, [_|Rest], Level, Hwm) ->
+ set_sink_handler_log_level(Sink, Rest, Level, Hwm).
+
+log_locations() ->
+ ensure_lager_configured(),
+ DefaultHandlers = application:get_env(lager, handlers, []),
+ Sinks = application:get_env(lager, extra_sinks, []),
+ ExtraHandlers = [proplists:get_value(handlers, Props, [])
+ || {_, Props} <- Sinks],
+ lists:sort(log_locations1([DefaultHandlers | ExtraHandlers], [])).
+
+log_locations1([Handlers | Rest], Locations) ->
+ Locations1 = log_locations2(Handlers, Locations),
+ log_locations1(Rest, Locations1);
+log_locations1([], Locations) ->
+ Locations.
+
+log_locations2([{lager_file_backend, Settings} | Rest], Locations) ->
+ FileName = lager_file_name1(Settings),
+ Locations1 = case lists:member(FileName, Locations) of
+ false -> [FileName | Locations];
+ true -> Locations
+ end,
+ log_locations2(Rest, Locations1);
+log_locations2([{lager_console_backend, _} | Rest], Locations) ->
+ Locations1 = case lists:member("<stdout>", Locations) of
+ false -> ["<stdout>" | Locations];
+ true -> Locations
+ end,
+ log_locations2(Rest, Locations1);
+log_locations2([_ | Rest], Locations) ->
+ log_locations2(Rest, Locations);
+log_locations2([], Locations) ->
+ Locations.
+
+fold_sinks(Fun, Acc) ->
+ Handlers = lager_config:global_get(handlers),
+ Sinks = dict:to_list(lists:foldl(
+ fun
+ ({{lager_file_backend, F}, _, S}, Dict) ->
+ dict:append(S, F, Dict);
+ ({_, _, S}, Dict) ->
+ case dict:is_key(S, Dict) of
+ true -> dict:store(S, [], Dict);
+ false -> Dict
+ end
+ end,
+ dict:new(), Handlers)),
+ fold_sinks(Sinks, Fun, Acc).
+
+fold_sinks([{SinkName, FileNames} | Rest], Fun, Acc) ->
+ Acc1 = Fun(SinkName, FileNames, Acc),
+ fold_sinks(Rest, Fun, Acc1);
+fold_sinks([], _, Acc) ->
+ Acc.
+
+ensure_log_working() ->
+ {ok, Handlers} = application:get_env(lager, handlers),
+ [ ensure_lager_handler_file_exist(Handler)
+ || Handler <- Handlers ],
+ Sinks = application:get_env(lager, extra_sinks, []),
+ ensure_extra_sinks_working(Sinks, list_expected_sinks()).
+
+ensure_extra_sinks_working(Sinks, [SinkName | Rest]) ->
+ case proplists:get_value(SinkName, Sinks) of
+ undefined -> throw({error, {cannot_log_to_file, unknown,
+ rabbit_log_lager_event_sink_undefined}});
+ Sink ->
+ SinkHandlers = proplists:get_value(handlers, Sink, []),
+ [ ensure_lager_handler_file_exist(Handler)
+ || Handler <- SinkHandlers ]
+ end,
+ ensure_extra_sinks_working(Sinks, Rest);
+ensure_extra_sinks_working(_Sinks, []) ->
+ ok.
+
+ensure_lager_handler_file_exist(Handler) ->
+ case lager_file_name(Handler) of
+ false -> ok;
+ FileName -> ensure_logfile_exist(FileName)
+ end.
+
+lager_file_name({lager_file_backend, Settings}) ->
+ lager_file_name1(Settings);
+lager_file_name(_) ->
+ false.
+
+lager_file_name1(Settings) when is_list(Settings) ->
+ {file, FileName} = proplists:lookup(file, Settings),
+ lager_util:expand_path(FileName);
+lager_file_name1({FileName, _}) -> lager_util:expand_path(FileName);
+lager_file_name1({FileName, _, _, _, _}) -> lager_util:expand_path(FileName);
+lager_file_name1(_) ->
+ throw({error, {cannot_log_to_file, unknown,
+ lager_file_backend_config_invalid}}).
+
+
+ensure_logfile_exist(FileName) ->
+ LogFile = lager_util:expand_path(FileName),
+ case rabbit_file:read_file_info(LogFile) of
+ {ok,_} -> ok;
+ {error, Err} -> throw({error, {cannot_log_to_file, LogFile, Err}})
+ end.
+
+ensure_lager_configured() ->
+ case lager_configured() of
+ false -> configure_lager();
+ true -> ok
+ end.
+
+%% Lager should have handlers and sinks
+%% Error logger forwarding to syslog should be disabled
+lager_configured() ->
+ Sinks = lager:list_all_sinks(),
+ ExpectedSinks = list_expected_sinks(),
+ application:get_env(lager, handlers) =/= undefined
+ andalso
+ lists:all(fun(S) -> lists:member(S, Sinks) end, ExpectedSinks)
+ andalso
+ application:get_env(syslog, syslog_error_logger) =/= undefined.
+
+configure_lager() ->
+ ok = app_utils:load_applications([lager]),
+ %% Turn off reformatting for error_logger messages
+ case application:get_env(lager, error_logger_redirect) of
+ undefined -> application:set_env(lager, error_logger_redirect, true);
+ _ -> ok
+ end,
+ case application:get_env(lager, error_logger_format_raw) of
+ undefined -> application:set_env(lager, error_logger_format_raw, true);
+ _ -> ok
+ end,
+ case application:get_env(lager, log_root) of
+ undefined ->
+ %% Setting env var to 'undefined' is different from not
+ %% setting it at all, and lager is sensitive to this
+ %% difference.
+ case application:get_env(rabbit, lager_log_root) of
+ {ok, Value} ->
+ ok = application:set_env(lager, log_root, Value);
+ _ ->
+ ok
+ end;
+ _ -> ok
+ end,
+ case application:get_env(lager, colored) of
+ undefined ->
+ UseColor = rabbit_prelaunch_early_logging:use_colored_logging(),
+ application:set_env(lager, colored, UseColor);
+ _ ->
+ ok
+ end,
+ %% Set rabbit.log config variable based on environment.
+ prepare_rabbit_log_config(),
+ %% Configure syslog library.
+ ok = configure_syslog_error_logger(),
+ %% At this point we should have rabbit.log application variable
+ %% configured to generate RabbitMQ log handlers.
+ GeneratedHandlers = generate_lager_handlers(),
+
+ %% If there are lager handlers configured,
+ %% both lager and generate RabbitMQ handlers are used.
+ %% This is because it's hard to decide clear preference rules.
+ %% RabbitMQ handlers can be set to [] to use only lager handlers.
+ Handlers = case application:get_env(lager, handlers, undefined) of
+ undefined -> GeneratedHandlers;
+ LagerHandlers ->
+ %% Remove handlers generated in previous starts
+ FormerRabbitHandlers = application:get_env(lager, rabbit_handlers, []),
+ GeneratedHandlers ++ remove_rabbit_handlers(LagerHandlers,
+ FormerRabbitHandlers)
+ end,
+
+ ok = application:set_env(lager, handlers, Handlers),
+ ok = application:set_env(lager, rabbit_handlers, GeneratedHandlers),
+
+ %% Setup extra sink/handlers. If they are not configured, redirect
+ %% messages to the default sink. To know the list of expected extra
+ %% sinks, we look at the 'lager_extra_sinks' compilation option.
+ LogConfig = application:get_env(rabbit, log, []),
+ LogLevels = application:get_env(rabbit, log_levels, []),
+ Categories = proplists:get_value(categories, LogConfig, []),
+ CategoriesConfig0 = case {Categories, LogLevels} of
+ {[], []} -> [];
+ {[], LogLevels} ->
+ io:format("Using deprecated config parameter 'log_levels'. "
+ "Please update your configuration file according to "
+ "https://rabbitmq.com/logging.html"),
+ lists:map(fun({Name, Level}) -> {Name, [{level, Level}]} end,
+ LogLevels);
+ {Categories, []} ->
+ Categories;
+ {Categories, _} ->
+ io:format("Using the deprecated config parameter 'rabbit.log_levels' together "
+ "with a new parameter for log categories."
+ " 'rabbit.log_levels' will be ignored. Please remove it from the config. More at "
+ "https://rabbitmq.com/logging.html"),
+ Categories
+ end,
+ LogLevelsFromContext = case rabbit_prelaunch:get_context() of
+ #{log_levels := LL} -> LL;
+ _ -> undefined
+ end,
+ Fun = fun
+ (global, _, CC) ->
+ CC;
+ (color, _, CC) ->
+ CC;
+ (CategoryS, LogLevel, CC) ->
+ Category = list_to_atom(CategoryS),
+ CCEntry = proplists:get_value(
+ Category, CC, []),
+ CCEntry1 = lists:ukeymerge(
+ 1,
+ [{level, LogLevel}],
+ lists:ukeysort(1, CCEntry)),
+ lists:keystore(
+ Category, 1, CC, {Category, CCEntry1})
+ end,
+ CategoriesConfig = case LogLevelsFromContext of
+ undefined ->
+ CategoriesConfig0;
+ _ ->
+ maps:fold(Fun,
+ CategoriesConfig0,
+ LogLevelsFromContext)
+ end,
+ SinkConfigs = lists:map(
+ fun({Name, Config}) ->
+ {rabbit_log:make_internal_sink_name(Name), Config}
+ end,
+ CategoriesConfig),
+ LagerSinks = application:get_env(lager, extra_sinks, []),
+ GeneratedSinks = generate_lager_sinks(
+ [error_logger_lager_event | list_expected_sinks()],
+ SinkConfigs),
+ Sinks = merge_lager_sink_handlers(LagerSinks, GeneratedSinks, []),
+ ok = application:set_env(lager, extra_sinks, Sinks),
+
+ case application:get_env(lager, error_logger_hwm) of
+ undefined ->
+ ok = application:set_env(lager, error_logger_hwm, 1000),
+ % NB: 50 is the default value in lager.app.src
+ ok = application:set_env(lager, error_logger_hwm_original, 50);
+ {ok, Val} when is_integer(Val) andalso Val < 1000 ->
+ ok = application:set_env(lager, error_logger_hwm, 1000),
+ ok = application:set_env(lager, error_logger_hwm_original, Val);
+ {ok, Val} when is_integer(Val) ->
+ ok = application:set_env(lager, error_logger_hwm_original, Val),
+ ok
+ end,
+ ok.
+
+configure_syslog_error_logger() ->
+ %% Disable error_logger forwarding to syslog if it's not configured
+ case application:get_env(syslog, syslog_error_logger) of
+ undefined ->
+ application:set_env(syslog, syslog_error_logger, false);
+ _ -> ok
+ end.
+
+remove_rabbit_handlers(Handlers, FormerHandlers) ->
+ lists:filter(fun(Handler) ->
+ not lists:member(Handler, FormerHandlers)
+ end,
+ Handlers).
+
+generate_lager_handlers() ->
+ LogConfig = application:get_env(rabbit, log, []),
+ LogHandlersConfig = lists:keydelete(categories, 1, LogConfig),
+ generate_lager_handlers(LogHandlersConfig).
+
+generate_lager_handlers(LogHandlersConfig) ->
+ lists:flatmap(
+ fun
+ ({file, HandlerConfig}) ->
+ case proplists:get_value(file, HandlerConfig, false) of
+ false -> [];
+ FileName when is_list(FileName) ->
+ Backend = lager_backend(file),
+ generate_handler(Backend, HandlerConfig)
+ end;
+ ({Other, HandlerConfig}) when
+ Other =:= console; Other =:= syslog; Other =:= exchange ->
+ case proplists:get_value(enabled, HandlerConfig, false) of
+ false -> [];
+ true ->
+ Backend = lager_backend(Other),
+ generate_handler(Backend,
+ lists:keydelete(enabled, 1, HandlerConfig))
+ end
+ end,
+ LogHandlersConfig).
+
+lager_backend(file) -> lager_file_backend;
+lager_backend(console) -> lager_console_backend;
+lager_backend(syslog) -> syslog_lager_backend;
+lager_backend(exchange) -> lager_exchange_backend.
+
+%% Syslog backend is using an old API for configuration and
+%% does not support proplists.
+generate_handler(syslog_lager_backend=Backend, HandlerConfig) ->
+ %% The default log level is set to `debug` because the actual
+ %% filtering is made at the sink level. We want to accept all
+ %% messages here.
+ DefaultConfigVal = debug,
+ Level = proplists:get_value(level, HandlerConfig, DefaultConfigVal),
+ ok = configure_handler_backend(Backend),
+ [{Backend,
+ [Level,
+ {},
+ {lager_default_formatter, syslog_formatter_config()}]}];
+generate_handler(Backend, HandlerConfig) ->
+ [{Backend,
+ lists:ukeymerge(1, lists:ukeysort(1, HandlerConfig),
+ lists:ukeysort(1, default_handler_config(Backend)))}].
+
+configure_handler_backend(syslog_lager_backend) ->
+ {ok, _} = application:ensure_all_started(syslog),
+ ok;
+configure_handler_backend(_Backend) ->
+ ok.
+
+default_handler_config(lager_console_backend) ->
+ %% The default log level is set to `debug` because the actual
+ %% filtering is made at the sink level. We want to accept all
+ %% messages here.
+ DefaultConfigVal = debug,
+ [{level, DefaultConfigVal},
+ {formatter_config, default_config_value({formatter_config, console})}];
+default_handler_config(lager_exchange_backend) ->
+ %% The default log level is set to `debug` because the actual
+ %% filtering is made at the sink level. We want to accept all
+ %% messages here.
+ DefaultConfigVal = debug,
+ [{level, DefaultConfigVal},
+ {formatter_config, default_config_value({formatter_config, exchange})}];
+default_handler_config(lager_file_backend) ->
+ %% The default log level is set to `debug` because the actual
+ %% filtering is made at the sink level. We want to accept all
+ %% messages here.
+ DefaultConfigVal = debug,
+ [{level, DefaultConfigVal},
+ {formatter_config, default_config_value({formatter_config, file})},
+ {date, ""},
+ {size, 0}].
+
+default_config_value(level) ->
+ LogConfig = application:get_env(rabbit, log, []),
+ FoldFun = fun
+ ({_, Cfg}, LL) when is_list(Cfg) ->
+ NewLL = proplists:get_value(level, Cfg, LL),
+ case LL of
+ undefined ->
+ NewLL;
+ _ ->
+ MoreVerbose = lager_util:level_to_num(NewLL) > lager_util:level_to_num(LL),
+ case MoreVerbose of
+ true -> NewLL;
+ false -> LL
+ end
+ end;
+ (_, LL) ->
+ LL
+ end,
+ FoundLL = lists:foldl(FoldFun, undefined, LogConfig),
+ case FoundLL of
+ undefined -> info;
+ _ -> FoundLL
+ end;
+default_config_value({formatter_config, console}) ->
+ EOL = case application:get_env(lager, colored) of
+ {ok, true} -> "\e[0m\r\n";
+ _ -> "\r\n"
+ end,
+ [date, " ", time, " ", color, "[", severity, "] ",
+ {pid, ""},
+ " ", message, EOL];
+default_config_value({formatter_config, _}) ->
+ [date, " ", time, " ", color, "[", severity, "] ",
+ {pid, ""},
+ " ", message, "\n"].
+
+syslog_formatter_config() ->
+ [color, "[", severity, "] ",
+ {pid, ""},
+ " ", message, "\n"].
+
+prepare_rabbit_log_config() ->
+ %% If RABBIT_LOGS is not set, we should ignore it.
+ DefaultFile = application:get_env(rabbit, lager_default_file, undefined),
+ %% If RABBIT_UPGRADE_LOGS is not set, we should ignore it.
+ UpgradeFile = application:get_env(rabbit, lager_upgrade_file, undefined),
+ case DefaultFile of
+ undefined -> ok;
+ false ->
+ set_env_default_log_disabled();
+ tty ->
+ set_env_default_log_console();
+ FileName when is_list(FileName) ->
+ case rabbit_prelaunch:get_context() of
+ %% The user explicitly sets $RABBITMQ_LOGS;
+ %% we should override a file location even
+ %% if it's set in rabbitmq.config
+ #{var_origins := #{main_log_file := environment}} ->
+ set_env_default_log_file(FileName, override);
+ _ ->
+ set_env_default_log_file(FileName, keep)
+ end
+ end,
+
+ %% Upgrade log file never overrides the value set in rabbitmq.config
+ case UpgradeFile of
+ %% No special env for upgrade logs - redirect to the default sink
+ undefined -> ok;
+ %% Redirect logs to default output.
+ DefaultFile -> ok;
+ UpgradeFileName when is_list(UpgradeFileName) ->
+ set_env_upgrade_log_file(UpgradeFileName)
+ end.
+
+set_env_default_log_disabled() ->
+ %% Disabling all the logs.
+ ok = application:set_env(rabbit, log, []).
+
+set_env_default_log_console() ->
+ LogConfig = application:get_env(rabbit, log, []),
+ ConsoleConfig = proplists:get_value(console, LogConfig, []),
+ LogConfigConsole =
+ lists:keystore(console, 1, LogConfig,
+ {console, lists:keystore(enabled, 1, ConsoleConfig,
+ {enabled, true})}),
+ %% Remove the file handler - disable logging to file
+ LogConfigConsoleNoFile = lists:keydelete(file, 1, LogConfigConsole),
+ ok = application:set_env(rabbit, log, LogConfigConsoleNoFile).
+
+set_env_default_log_file(FileName, Override) ->
+ LogConfig = application:get_env(rabbit, log, []),
+ FileConfig = proplists:get_value(file, LogConfig, []),
+ NewLogConfig = case proplists:get_value(file, FileConfig, undefined) of
+ undefined ->
+ lists:keystore(file, 1, LogConfig,
+ {file, lists:keystore(file, 1, FileConfig,
+ {file, FileName})});
+ _ConfiguredFileName ->
+ case Override of
+ override ->
+ lists:keystore(
+ file, 1, LogConfig,
+ {file, lists:keystore(file, 1, FileConfig,
+ {file, FileName})});
+ keep ->
+ LogConfig
+ end
+ end,
+ ok = application:set_env(rabbit, log, NewLogConfig).
+
+set_env_upgrade_log_file(FileName) ->
+ LogConfig = application:get_env(rabbit, log, []),
+ SinksConfig = proplists:get_value(categories, LogConfig, []),
+ UpgradeSinkConfig = proplists:get_value(upgrade, SinksConfig, []),
+ FileConfig = proplists:get_value(file, SinksConfig, []),
+ NewLogConfig = case proplists:get_value(file, FileConfig, undefined) of
+ undefined ->
+ lists:keystore(
+ categories, 1, LogConfig,
+ {categories,
+ lists:keystore(
+ upgrade, 1, SinksConfig,
+ {upgrade,
+ lists:keystore(file, 1, UpgradeSinkConfig,
+ {file, FileName})})});
+ %% No cahnge. We don't want to override the configured value.
+ _File -> LogConfig
+ end,
+ ok = application:set_env(rabbit, log, NewLogConfig).
+
+generate_lager_sinks(SinkNames, SinkConfigs) ->
+ LogLevels = case rabbit_prelaunch:get_context() of
+ #{log_levels := LL} -> LL;
+ _ -> undefined
+ end,
+ DefaultLogLevel = case LogLevels of
+ #{global := LogLevel} ->
+ LogLevel;
+ _ ->
+ default_config_value(level)
+ end,
+ lists:map(fun(SinkName) ->
+ SinkConfig = proplists:get_value(SinkName, SinkConfigs, []),
+ SinkHandlers = case proplists:get_value(file, SinkConfig, false) of
+ %% If no file defined - forward everything to the default backend
+ false ->
+ ForwarderLevel = proplists:get_value(level,
+ SinkConfig,
+ DefaultLogLevel),
+ [{lager_forwarder_backend,
+ [lager_util:make_internal_sink_name(lager), ForwarderLevel]}];
+ %% If a file defined - add a file backend to handlers and remove all default file backends.
+ File ->
+ %% Use `debug` as a default handler to not override a handler level
+ Level = proplists:get_value(level, SinkConfig, DefaultLogLevel),
+ DefaultGeneratedHandlers = application:get_env(lager, rabbit_handlers, []),
+ SinkFileHandlers = case proplists:get_value(lager_file_backend, DefaultGeneratedHandlers, undefined) of
+ undefined ->
+ %% Create a new file handler.
+ %% `info` is a default level here.
+ FileLevel = proplists:get_value(level, SinkConfig, DefaultLogLevel),
+ generate_lager_handlers([{file, [{file, File}, {level, FileLevel}]}]);
+ FileHandler ->
+ %% Replace a filename in the handler
+ FileHandlerChanges = case handler_level_more_verbose(FileHandler, Level) of
+ true -> [{file, File}, {level, Level}];
+ false -> [{file, File}]
+ end,
+
+ [{lager_file_backend,
+ lists:ukeymerge(1, FileHandlerChanges,
+ lists:ukeysort(1, FileHandler))}]
+ end,
+ %% Remove all file handlers.
+ AllLagerHandlers = application:get_env(lager, handlers, []),
+ HandlersWithoutFile = lists:filter(
+ fun({lager_file_backend, _}) -> false;
+ ({_, _}) -> true
+ end,
+ AllLagerHandlers),
+ %% Set level for handlers which are more verbose.
+ %% We don't increase verbosity in sinks so it works like forwarder backend.
+ HandlersWithoutFileWithLevel = lists:map(fun({Name, Handler}) ->
+ case handler_level_more_verbose(Handler, Level) of
+ true -> {Name, lists:keystore(level, 1, Handler, {level, Level})};
+ false -> {Name, Handler}
+ end
+ end,
+ HandlersWithoutFile),
+
+ HandlersWithoutFileWithLevel ++ SinkFileHandlers
+ end,
+ {SinkName, [{handlers, SinkHandlers}, {rabbit_handlers, SinkHandlers}]}
+ end,
+ SinkNames).
+
+handler_level_more_verbose(Handler, Level) ->
+ HandlerLevel = proplists:get_value(level, Handler, default_config_value(level)),
+ lager_util:level_to_num(HandlerLevel) > lager_util:level_to_num(Level).
+
+merge_lager_sink_handlers([{Name, Sink} | RestSinks], GeneratedSinks, Agg) ->
+ %% rabbitmq/rabbitmq-server#2044.
+ %% We have to take into account that a sink's
+ %% handler backend may need additional configuration here.
+ %% {rabbit_log_federation_lager_event, [
+ %% {handlers, [
+ %% {lager_forwarder_backend, [lager_event,inherit]},
+ %% {syslog_lager_backend, [debug]}
+ %% ]},
+ %% {rabbit_handlers, [
+ %% {lager_forwarder_backend, [lager_event,inherit]}
+ %% ]}
+ %% ]}
+ case lists:keytake(Name, 1, GeneratedSinks) of
+ {value, {Name, GenSink}, RestGeneratedSinks} ->
+ Handlers = proplists:get_value(handlers, Sink, []),
+ GenHandlers = proplists:get_value(handlers, GenSink, []),
+ FormerRabbitHandlers = proplists:get_value(rabbit_handlers, Sink, []),
+
+ %% Remove handlers defined in previous starts
+ ConfiguredHandlers = remove_rabbit_handlers(Handlers, FormerRabbitHandlers),
+ NewHandlers = GenHandlers ++ ConfiguredHandlers,
+ ok = maybe_configure_handler_backends(NewHandlers),
+ MergedSink = lists:keystore(rabbit_handlers, 1,
+ lists:keystore(handlers, 1, Sink,
+ {handlers, NewHandlers}),
+ {rabbit_handlers, GenHandlers}),
+ merge_lager_sink_handlers(
+ RestSinks,
+ RestGeneratedSinks,
+ [{Name, MergedSink} | Agg]);
+ false ->
+ merge_lager_sink_handlers(
+ RestSinks,
+ GeneratedSinks,
+ [{Name, Sink} | Agg])
+ end;
+merge_lager_sink_handlers([], GeneratedSinks, Agg) -> GeneratedSinks ++ Agg.
+
+maybe_configure_handler_backends([]) ->
+ ok;
+maybe_configure_handler_backends([{Backend, _}|Backends]) ->
+ ok = configure_handler_backend(Backend),
+ maybe_configure_handler_backends(Backends).
+
+list_expected_sinks() ->
+ rabbit_prelaunch_early_logging:list_expected_sinks().
+
+maybe_remove_logger_handler() ->
+ M = logger,
+ F = remove_handler,
+ try
+ ok = erlang:apply(M, F, [default])
+ catch
+ error:undef ->
+ % OK since the logger module only exists in OTP 21.1 or later
+ ok;
+ error:{badmatch, {error, {not_found, default}}} ->
+ % OK - this error happens when running a CLI command
+ ok;
+ Err:Reason ->
+ error_logger:error_msg("calling ~p:~p failed: ~p:~p~n",
+ [M, F, Err, Reason])
+ end.
+
+get_most_verbose_log_level() ->
+ {ok, HandlersA} = application:get_env(lager, handlers),
+ {ok, ExtraSinks} = application:get_env(lager, extra_sinks),
+ HandlersB = lists:append(
+ [H || {_, Keys} <- ExtraSinks,
+ {handlers, H} <- Keys]),
+ get_most_verbose_log_level(HandlersA ++ HandlersB,
+ lager_util:level_to_num(none)).
+
+get_most_verbose_log_level([{_, Props} | Rest], MostVerbose) ->
+ LogLevel = proplists:get_value(level, Props, info),
+ LogLevelNum = lager_util:level_to_num(LogLevel),
+ case LogLevelNum > MostVerbose of
+ true ->
+ get_most_verbose_log_level(Rest, LogLevelNum);
+ false ->
+ get_most_verbose_log_level(Rest, MostVerbose)
+ end;
+get_most_verbose_log_level([], MostVerbose) ->
+ lager_util:num_to_level(MostVerbose).
diff --git a/deps/rabbit/src/rabbit_limiter.erl b/deps/rabbit/src/rabbit_limiter.erl
new file mode 100644
index 0000000000..d3803957d3
--- /dev/null
+++ b/deps/rabbit/src/rabbit_limiter.erl
@@ -0,0 +1,448 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% The purpose of the limiter is to stem the flow of messages from
+%% queues to channels, in order to act upon various protocol-level
+%% flow control mechanisms, specifically AMQP 0-9-1's basic.qos
+%% prefetch_count, our consumer prefetch extension, and AMQP 1.0's
+%% link (aka consumer) credit mechanism.
+%%
+%% Each channel has an associated limiter process, created with
+%% start_link/1, which it passes to queues on consumer creation with
+%% rabbit_amqqueue:basic_consume/10, and rabbit_amqqueue:basic_get/4.
+%% The latter isn't strictly necessary, since basic.get is not
+%% subject to limiting, but it means that whenever a queue knows about
+%% a channel, it also knows about its limiter, which is less fiddly.
+%%
+%% The limiter process holds state that is, in effect, shared between
+%% the channel and all queues from which the channel is
+%% consuming. Essentially all these queues are competing for access to
+%% a single, limited resource - the ability to deliver messages via
+%% the channel - and it is the job of the limiter process to mediate
+%% that access.
+%%
+%% The limiter process is separate from the channel process for two
+%% reasons: separation of concerns, and efficiency. Channels can get
+%% very busy, particularly if they are also dealing with publishes.
+%% With a separate limiter process all the aforementioned access
+%% mediation can take place without touching the channel.
+%%
+%% For efficiency, both the channel and the queues keep some local
+%% state, initialised from the limiter pid with new/1 and client/1,
+%% respectively. In particular this allows them to avoid any
+%% interaction with the limiter process when it is 'inactive', i.e. no
+%% protocol-level flow control is taking place.
+%%
+%% This optimisation does come at the cost of some complexity though:
+%% when a limiter becomes active, the channel needs to inform all its
+%% consumer queues of this change in status. It does this by invoking
+%% rabbit_amqqueue:activate_limit_all/2. Note that there is no inverse
+%% transition, i.e. once a queue has been told about an active
+%% limiter, it is not subsequently told when that limiter becomes
+%% inactive. In practice it is rare for that to happen, though we
+%% could optimise this case in the future.
+%%
+%% Consumer credit (for AMQP 1.0) and per-consumer prefetch (for AMQP
+%% 0-9-1) are treated as essentially the same thing, but with the
+%% exception that per-consumer prefetch gets an auto-topup when
+%% acknowledgments come in.
+%%
+%% The bookkeeping for this is local to queues, so it is not necessary
+%% to store information about it in the limiter process. But for
+%% abstraction we hide it from the queue behind the limiter API, and
+%% it therefore becomes part of the queue local state.
+%%
+%% The interactions with the limiter are as follows:
+%%
+%% 1. Channels tell the limiter about basic.qos prefetch counts -
+%% that's what the limit_prefetch/3, unlimit_prefetch/1,
+%% get_prefetch_limit/1 API functions are about. They also tell the
+%% limiter queue state (via the queue) about consumer credit
+%% changes and message acknowledgement - that's what credit/5 and
+%% ack_from_queue/3 are for.
+%%
+%% 2. Queues also tell the limiter queue state about the queue
+%% becoming empty (via drained/1) and consumers leaving (via
+%% forget_consumer/2).
+%%
+%% 3. Queues register with the limiter - this happens as part of
+%% activate/1.
+%%
+%% 4. The limiter process maintains an internal counter of 'messages
+%% sent but not yet acknowledged', called the 'volume'.
+%%
+%% 5. Queues ask the limiter for permission (with can_send/3) whenever
+%% they want to deliver a message to a channel. The limiter checks
+%% whether a) the volume has not yet reached the prefetch limit,
+%% and b) whether the consumer has enough credit. If so it
+%% increments the volume and tells the queue to proceed. Otherwise
+%% it marks the queue as requiring notification (see below) and
+%% tells the queue not to proceed.
+%%
+%% 6. A queue that has been told to proceed (by the return value of
+%% can_send/3) sends the message to the channel. Conversely, a
+%% queue that has been told not to proceed, will not attempt to
+%% deliver that message, or any future messages, to the
+%% channel. This is accomplished by can_send/3 capturing the
+%% outcome in the local state, where it can be accessed with
+%% is_suspended/1.
+%%
+%% 7. When a channel receives an ack it tells the limiter (via ack/2)
+%% how many messages were ack'ed. The limiter process decrements
+%% the volume and if it falls below the prefetch_count then it
+%% notifies (through rabbit_amqqueue:resume/2) all the queues
+%% requiring notification, i.e. all those that had a can_send/3
+%% request denied.
+%%
+%% 8. Upon receipt of such a notification, queues resume delivery to
+%% the channel, i.e. they will once again start asking limiter, as
+%% described in (5).
+%%
+%% 9. When a queue has no more consumers associated with a particular
+%% channel, it deactivates use of the limiter with deactivate/1,
+%% which alters the local state such that no further interactions
+%% with the limiter process take place until a subsequent
+%% activate/1.
+
+-module(rabbit_limiter).
+
+-include("rabbit.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/1]).
+%% channel API
+-export([new/1, limit_prefetch/3, unlimit_prefetch/1, is_active/1,
+ get_prefetch_limit/1, ack/2, pid/1]).
+%% queue API
+-export([client/1, activate/1, can_send/3, resume/1, deactivate/1,
+ is_suspended/1, is_consumer_blocked/2, credit/5, ack_from_queue/3,
+ drained/1, forget_consumer/2]).
+%% callbacks
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, prioritise_call/4]).
+
+%%----------------------------------------------------------------------------
+
+-record(lstate, {pid, prefetch_limited}).
+-record(qstate, {pid, state, credits}).
+
+-type lstate() :: #lstate{pid :: pid(),
+ prefetch_limited :: boolean()}.
+-type qstate() :: #qstate{pid :: pid(),
+ state :: 'dormant' | 'active' | 'suspended'}.
+
+-type credit_mode() :: 'manual' | 'drain' | 'auto'.
+
+%%----------------------------------------------------------------------------
+
+-record(lim, {prefetch_count = 0,
+ ch_pid,
+ %% 'Notify' is a boolean that indicates whether a queue should be
+ %% notified of a change in the limit or volume that may allow it to
+ %% deliver more messages via the limiter's channel.
+ queues = maps:new(), % QPid -> {MonitorRef, Notify}
+ volume = 0}).
+
+%% mode is of type credit_mode()
+-record(credit, {credit = 0, mode}).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+-spec start_link(rabbit_types:proc_name()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(ProcName) -> gen_server2:start_link(?MODULE, [ProcName], []).
+
+-spec new(pid()) -> lstate().
+
+new(Pid) ->
+ %% this a 'call' to ensure that it is invoked at most once.
+ ok = gen_server:call(Pid, {new, self()}, infinity),
+ #lstate{pid = Pid, prefetch_limited = false}.
+
+-spec limit_prefetch(lstate(), non_neg_integer(), non_neg_integer()) ->
+ lstate().
+
+limit_prefetch(L, PrefetchCount, UnackedCount) when PrefetchCount > 0 ->
+ ok = gen_server:call(
+ L#lstate.pid,
+ {limit_prefetch, PrefetchCount, UnackedCount}, infinity),
+ L#lstate{prefetch_limited = true}.
+
+-spec unlimit_prefetch(lstate()) -> lstate().
+
+unlimit_prefetch(L) ->
+ ok = gen_server:call(L#lstate.pid, unlimit_prefetch, infinity),
+ L#lstate{prefetch_limited = false}.
+
+-spec is_active(lstate()) -> boolean().
+
+is_active(#lstate{prefetch_limited = Limited}) -> Limited.
+
+-spec get_prefetch_limit(lstate()) -> non_neg_integer().
+
+get_prefetch_limit(#lstate{prefetch_limited = false}) -> 0;
+get_prefetch_limit(L) ->
+ gen_server:call(L#lstate.pid, get_prefetch_limit, infinity).
+
+-spec ack(lstate(), non_neg_integer()) -> 'ok'.
+
+ack(#lstate{prefetch_limited = false}, _AckCount) -> ok;
+ack(L, AckCount) -> gen_server:cast(L#lstate.pid, {ack, AckCount}).
+
+-spec pid(lstate()) -> pid().
+
+pid(#lstate{pid = Pid}) -> Pid.
+
+-spec client(pid()) -> qstate().
+
+client(Pid) -> #qstate{pid = Pid, state = dormant, credits = gb_trees:empty()}.
+
+-spec activate(qstate()) -> qstate().
+
+activate(L = #qstate{state = dormant}) ->
+ ok = gen_server:cast(L#qstate.pid, {register, self()}),
+ L#qstate{state = active};
+activate(L) -> L.
+
+-spec can_send(qstate(), boolean(), rabbit_types:ctag()) ->
+ {'continue' | 'suspend', qstate()}.
+
+can_send(L = #qstate{pid = Pid, state = State, credits = Credits},
+ AckRequired, CTag) ->
+ case is_consumer_blocked(L, CTag) of
+ false -> case (State =/= active orelse
+ safe_call(Pid, {can_send, self(), AckRequired}, true)) of
+ true -> Credits1 = decrement_credit(CTag, Credits),
+ {continue, L#qstate{credits = Credits1}};
+ false -> {suspend, L#qstate{state = suspended}}
+ end;
+ true -> {suspend, L}
+ end.
+
+safe_call(Pid, Msg, ExitValue) ->
+ rabbit_misc:with_exit_handler(
+ fun () -> ExitValue end,
+ fun () -> gen_server2:call(Pid, Msg, infinity) end).
+
+-spec resume(qstate()) -> qstate().
+
+resume(L = #qstate{state = suspended}) ->
+ L#qstate{state = active};
+resume(L) -> L.
+
+-spec deactivate(qstate()) -> qstate().
+
+deactivate(L = #qstate{state = dormant}) -> L;
+deactivate(L) ->
+ ok = gen_server:cast(L#qstate.pid, {unregister, self()}),
+ L#qstate{state = dormant}.
+
+-spec is_suspended(qstate()) -> boolean().
+
+is_suspended(#qstate{state = suspended}) -> true;
+is_suspended(#qstate{}) -> false.
+
+-spec is_consumer_blocked(qstate(), rabbit_types:ctag()) -> boolean().
+
+is_consumer_blocked(#qstate{credits = Credits}, CTag) ->
+ case gb_trees:lookup(CTag, Credits) of
+ none -> false;
+ {value, #credit{credit = C}} when C > 0 -> false;
+ {value, #credit{}} -> true
+ end.
+
+-spec credit
+ (qstate(), rabbit_types:ctag(), non_neg_integer(), credit_mode(),
+ boolean()) ->
+ {boolean(), qstate()}.
+
+credit(Limiter = #qstate{credits = Credits}, CTag, Crd, Mode, IsEmpty) ->
+ {Res, Cr} =
+ case IsEmpty andalso Mode =:= drain of
+ true -> {true, #credit{credit = 0, mode = manual}};
+ false -> {false, #credit{credit = Crd, mode = Mode}}
+ end,
+ {Res, Limiter#qstate{credits = enter_credit(CTag, Cr, Credits)}}.
+
+-spec ack_from_queue(qstate(), rabbit_types:ctag(), non_neg_integer()) ->
+ {boolean(), qstate()}.
+
+ack_from_queue(Limiter = #qstate{credits = Credits}, CTag, Credit) ->
+ {Credits1, Unblocked} =
+ case gb_trees:lookup(CTag, Credits) of
+ {value, C = #credit{mode = auto, credit = C0}} ->
+ {update_credit(CTag, C#credit{credit = C0 + Credit}, Credits),
+ C0 =:= 0 andalso Credit =/= 0};
+ _ ->
+ {Credits, false}
+ end,
+ {Unblocked, Limiter#qstate{credits = Credits1}}.
+
+-spec drained(qstate()) ->
+ {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}.
+
+drained(Limiter = #qstate{credits = Credits}) ->
+ Drain = fun(C) -> C#credit{credit = 0, mode = manual} end,
+ {CTagCredits, Credits2} =
+ rabbit_misc:gb_trees_fold(
+ fun (CTag, C = #credit{credit = Crd, mode = drain}, {Acc, Creds0}) ->
+ {[{CTag, Crd} | Acc], update_credit(CTag, Drain(C), Creds0)};
+ (_CTag, #credit{credit = _Crd, mode = _Mode}, {Acc, Creds0}) ->
+ {Acc, Creds0}
+ end, {[], Credits}, Credits),
+ {CTagCredits, Limiter#qstate{credits = Credits2}}.
+
+-spec forget_consumer(qstate(), rabbit_types:ctag()) -> qstate().
+
+forget_consumer(Limiter = #qstate{credits = Credits}, CTag) ->
+ Limiter#qstate{credits = gb_trees:delete_any(CTag, Credits)}.
+
+%%----------------------------------------------------------------------------
+%% Queue-local code
+%%----------------------------------------------------------------------------
+
+%% We want to do all the AMQP 1.0-ish link level credit calculations
+%% in the queue (to do them elsewhere introduces a ton of
+%% races). However, it's a big chunk of code that is conceptually very
+%% linked to the limiter concept. So we get the queue to hold a bit of
+%% state for us (#qstate.credits), and maintain a fiction that the
+%% limiter is making the decisions...
+
+decrement_credit(CTag, Credits) ->
+ case gb_trees:lookup(CTag, Credits) of
+ {value, C = #credit{credit = Credit}} ->
+ update_credit(CTag, C#credit{credit = Credit - 1}, Credits);
+ none ->
+ Credits
+ end.
+
+enter_credit(CTag, C, Credits) ->
+ gb_trees:enter(CTag, ensure_credit_invariant(C), Credits).
+
+update_credit(CTag, C, Credits) ->
+ gb_trees:update(CTag, ensure_credit_invariant(C), Credits).
+
+ensure_credit_invariant(C = #credit{credit = 0, mode = drain}) ->
+ %% Using up all credit implies no need to send a 'drained' event
+ C#credit{mode = manual};
+ensure_credit_invariant(C) ->
+ C.
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([ProcName]) -> ?store_proc_name(ProcName),
+ ?LG_PROCESS_TYPE(limiter),
+ {ok, #lim{}}.
+
+prioritise_call(get_prefetch_limit, _From, _Len, _State) -> 9;
+prioritise_call(_Msg, _From, _Len, _State) -> 0.
+
+handle_call({new, ChPid}, _From, State = #lim{ch_pid = undefined}) ->
+ {reply, ok, State#lim{ch_pid = ChPid}};
+
+handle_call({limit_prefetch, PrefetchCount, UnackedCount}, _From,
+ State = #lim{prefetch_count = 0}) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = PrefetchCount,
+ volume = UnackedCount})};
+handle_call({limit_prefetch, PrefetchCount, _UnackedCount}, _From, State) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = PrefetchCount})};
+
+handle_call(unlimit_prefetch, _From, State) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = 0,
+ volume = 0})};
+
+handle_call(get_prefetch_limit, _From,
+ State = #lim{prefetch_count = PrefetchCount}) ->
+ {reply, PrefetchCount, State};
+
+handle_call({can_send, QPid, AckRequired}, _From,
+ State = #lim{volume = Volume}) ->
+ case prefetch_limit_reached(State) of
+ true -> {reply, false, limit_queue(QPid, State)};
+ false -> {reply, true, State#lim{volume = if AckRequired -> Volume + 1;
+ true -> Volume
+ end}}
+ end.
+
+handle_cast({ack, Count}, State = #lim{volume = Volume}) ->
+ NewVolume = if Volume == 0 -> 0;
+ true -> Volume - Count
+ end,
+ {noreply, maybe_notify(State, State#lim{volume = NewVolume})};
+
+handle_cast({register, QPid}, State) ->
+ {noreply, remember_queue(QPid, State)};
+
+handle_cast({unregister, QPid}, State) ->
+ {noreply, forget_queue(QPid, State)}.
+
+handle_info({'DOWN', _MonitorRef, _Type, QPid, _Info}, State) ->
+ {noreply, forget_queue(QPid, State)}.
+
+terminate(_, _) ->
+ ok.
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing
+%%----------------------------------------------------------------------------
+
+maybe_notify(OldState, NewState) ->
+ case prefetch_limit_reached(OldState) andalso
+ not prefetch_limit_reached(NewState) of
+ true -> notify_queues(NewState);
+ false -> NewState
+ end.
+
+prefetch_limit_reached(#lim{prefetch_count = Limit, volume = Volume}) ->
+ Limit =/= 0 andalso Volume >= Limit.
+
+remember_queue(QPid, State = #lim{queues = Queues}) ->
+ case maps:is_key(QPid, Queues) of
+ false -> MRef = erlang:monitor(process, QPid),
+ State#lim{queues = maps:put(QPid, {MRef, false}, Queues)};
+ true -> State
+ end.
+
+forget_queue(QPid, State = #lim{queues = Queues}) ->
+ case maps:find(QPid, Queues) of
+ {ok, {MRef, _}} -> true = erlang:demonitor(MRef),
+ State#lim{queues = maps:remove(QPid, Queues)};
+ error -> State
+ end.
+
+limit_queue(QPid, State = #lim{queues = Queues}) ->
+ UpdateFun = fun ({MRef, _}) -> {MRef, true} end,
+ State#lim{queues = maps:update_with(QPid, UpdateFun, Queues)}.
+
+notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) ->
+ {QList, NewQueues} =
+ maps:fold(fun (_QPid, {_, false}, Acc) -> Acc;
+ (QPid, {MRef, true}, {L, D}) ->
+ {[QPid | L], maps:put(QPid, {MRef, false}, D)}
+ end, {[], Queues}, Queues),
+ case length(QList) of
+ 0 -> ok;
+ 1 -> ok = rabbit_amqqueue:resume(hd(QList), ChPid); %% common case
+ L ->
+ %% We randomly vary the position of queues in the list,
+ %% thus ensuring that each queue has an equal chance of
+ %% being notified first.
+ {L1, L2} = lists:split(rand:uniform(L), QList),
+ [[ok = rabbit_amqqueue:resume(Q, ChPid) || Q <- L3]
+ || L3 <- [L2, L1]],
+ ok
+ end,
+ State#lim{queues = NewQueues}.
diff --git a/deps/rabbit/src/rabbit_log_tail.erl b/deps/rabbit/src/rabbit_log_tail.erl
new file mode 100644
index 0000000000..c3faad07fc
--- /dev/null
+++ b/deps/rabbit/src/rabbit_log_tail.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_log_tail).
+
+-export([tail_n_lines/2]).
+-export([init_tail_stream/4]).
+
+-define(GUESS_OFFSET, 200).
+
+init_tail_stream(Filename, Pid, Ref, Duration) ->
+ RPCProc = self(),
+ Reader = spawn(fun() ->
+ link(Pid),
+ case file:open(Filename, [read, binary]) of
+ {ok, File} ->
+ TimeLimit = case Duration of
+ infinity -> infinity;
+ _ -> erlang:system_time(second) + Duration
+ end,
+ {ok, _} = file:position(File, eof),
+ RPCProc ! {Ref, opened},
+ read_loop(File, Pid, Ref, TimeLimit);
+ {error, _} = Err ->
+ RPCProc ! {Ref, Err}
+ end
+ end),
+ receive
+ {Ref, opened} -> {ok, Ref};
+ {Ref, {error, Err}} -> {error, Err}
+ after 5000 ->
+ exit(Reader, timeout),
+ {error, timeout}
+ end.
+
+read_loop(File, Pid, Ref, TimeLimit) ->
+ case is_integer(TimeLimit) andalso erlang:system_time(second) > TimeLimit of
+ true -> Pid ! {Ref, <<>>, finished};
+ false ->
+ case file:read(File, ?GUESS_OFFSET) of
+ {ok, Data} ->
+ Pid ! {Ref, Data, confinue},
+ read_loop(File, Pid, Ref, TimeLimit);
+ eof ->
+ timer:sleep(1000),
+ read_loop(File, Pid, Ref, TimeLimit);
+ {error, _} = Err ->
+ Pid ! {Ref, Err, finished}
+ end
+ end.
+
+tail_n_lines(Filename, N) ->
+ case file:open(Filename, [read, binary]) of
+ {ok, File} ->
+ {ok, Eof} = file:position(File, eof),
+ %% Eof may move. Only read up to the current one.
+ Result = reverse_read_n_lines(N, N, File, Eof, Eof),
+ file:close(File),
+ Result;
+ {error, _} = Error -> Error
+ end.
+
+reverse_read_n_lines(N, OffsetN, File, Position, Eof) ->
+ GuessPosition = offset(Position, OffsetN),
+ case read_lines_from_position(File, GuessPosition, Eof) of
+ {ok, Lines} ->
+ NLines = length(Lines),
+ case {NLines >= N, GuessPosition == 0} of
+ %% Take only N lines if there is more
+ {true, _} -> lists:nthtail(NLines - N, Lines);
+ %% Safe to assume that NLines is less then N
+ {_, true} -> Lines;
+ %% Adjust position
+ _ ->
+ reverse_read_n_lines(N, N - NLines + 1, File, GuessPosition, Eof)
+ end;
+ {error, _} = Error -> Error
+ end.
+
+read_from_position(File, GuessPosition, Eof) ->
+ file:pread(File, GuessPosition, max(0, Eof - GuessPosition)).
+
+read_lines_from_position(File, GuessPosition, Eof) ->
+ case read_from_position(File, GuessPosition, Eof) of
+ {ok, Data} ->
+ Lines = binary:split(Data, <<"\n">>, [global, trim]),
+ case {GuessPosition, Lines} of
+ %% If position is 0 - there are no partial lines
+ {0, _} -> {ok, Lines};
+ %% Remove first line as it can be partial
+ {_, [_ | Rest]} -> {ok, Rest};
+ {_, []} -> {ok, []}
+ end;
+ {error, _} = Error -> Error
+ end.
+
+offset(Base, N) ->
+ max(0, Base - N * ?GUESS_OFFSET).
diff --git a/deps/rabbit/src/rabbit_looking_glass.erl b/deps/rabbit/src/rabbit_looking_glass.erl
new file mode 100644
index 0000000000..00b1b6d46b
--- /dev/null
+++ b/deps/rabbit/src/rabbit_looking_glass.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_looking_glass).
+
+-ignore_xref([{lg, trace, 4}]).
+-ignore_xref([{maps, from_list, 1}]).
+
+-export([boot/0]).
+-export([connections/0]).
+
+boot() ->
+ case os:getenv("RABBITMQ_TRACER") of
+ false ->
+ ok;
+ Value ->
+ Input = parse_value(Value),
+ rabbit_log:info(
+ "Enabling Looking Glass profiler, input value: ~p",
+ [Input]
+ ),
+ {ok, _} = application:ensure_all_started(looking_glass),
+ lg:trace(
+ Input,
+ lg_file_tracer,
+ "traces.lz4",
+ maps:from_list([
+ {mode, profile},
+ {process_dump, true},
+ {running, true},
+ {send, true}]
+ )
+ )
+ end.
+
+parse_value(Value) ->
+ [begin
+ [Mod, Fun] = string:tokens(C, ":"),
+ {callback, list_to_atom(Mod), list_to_atom(Fun)}
+ end || C <- string:tokens(Value, ",")].
+
+connections() ->
+ Pids = [Pid || {{conns_sup, _}, Pid} <- ets:tab2list(ranch_server)],
+ ['_', {scope, Pids}].
diff --git a/deps/rabbit/src/rabbit_maintenance.erl b/deps/rabbit/src/rabbit_maintenance.erl
new file mode 100644
index 0000000000..e5434dc888
--- /dev/null
+++ b/deps/rabbit/src/rabbit_maintenance.erl
@@ -0,0 +1,354 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_maintenance).
+
+-include("rabbit.hrl").
+
+-export([
+ is_enabled/0,
+ drain/0,
+ revive/0,
+ mark_as_being_drained/0,
+ unmark_as_being_drained/0,
+ is_being_drained_local_read/1,
+ is_being_drained_consistent_read/1,
+ status_local_read/1,
+ status_consistent_read/1,
+ filter_out_drained_nodes_local_read/1,
+ filter_out_drained_nodes_consistent_read/1,
+ suspend_all_client_listeners/0,
+ resume_all_client_listeners/0,
+ close_all_client_connections/0,
+ primary_replica_transfer_candidate_nodes/0,
+ random_primary_replica_transfer_candidate_node/1,
+ transfer_leadership_of_quorum_queues/1,
+ transfer_leadership_of_classic_mirrored_queues/1,
+ status_table_name/0,
+ status_table_definition/0
+]).
+
+-define(TABLE, rabbit_node_maintenance_states).
+-define(FEATURE_FLAG, maintenance_mode_status).
+-define(DEFAULT_STATUS, regular).
+-define(DRAINING_STATUS, draining).
+
+-type maintenance_status() :: ?DEFAULT_STATUS | ?DRAINING_STATUS.
+-type mnesia_table() :: atom().
+
+-export_type([
+ maintenance_status/0
+]).
+
+%%
+%% API
+%%
+
+-spec status_table_name() -> mnesia_table().
+status_table_name() ->
+ ?TABLE.
+
+-spec status_table_definition() -> list().
+status_table_definition() ->
+ maps:to_list(#{
+ record_name => node_maintenance_state,
+ attributes => record_info(fields, node_maintenance_state)
+ }).
+
+-spec is_enabled() -> boolean().
+is_enabled() ->
+ rabbit_feature_flags:is_enabled(?FEATURE_FLAG).
+
+-spec drain() -> ok.
+drain() ->
+ case is_enabled() of
+ true -> do_drain();
+ false -> rabbit_log:warning("Feature flag `~s` is not enabled, draining is a no-op", [?FEATURE_FLAG])
+ end.
+
+-spec do_drain() -> ok.
+do_drain() ->
+ rabbit_log:alert("This node is being put into maintenance (drain) mode"),
+ mark_as_being_drained(),
+ rabbit_log:info("Marked this node as undergoing maintenance"),
+ suspend_all_client_listeners(),
+ rabbit_log:alert("Suspended all listeners and will no longer accept client connections"),
+ {ok, NConnections} = close_all_client_connections(),
+ %% allow plugins to react e.g. by closing their protocol connections
+ rabbit_event:notify(maintenance_connections_closed, #{
+ reason => <<"node is being put into maintenance">>
+ }),
+ rabbit_log:alert("Closed ~b local client connections", [NConnections]),
+
+ TransferCandidates = primary_replica_transfer_candidate_nodes(),
+ ReadableCandidates = readable_candidate_list(TransferCandidates),
+ rabbit_log:info("Node will transfer primary replicas of its queues to ~b peers: ~s",
+ [length(TransferCandidates), ReadableCandidates]),
+ transfer_leadership_of_classic_mirrored_queues(TransferCandidates),
+ transfer_leadership_of_quorum_queues(TransferCandidates),
+ stop_local_quorum_queue_followers(),
+
+ %% allow plugins to react
+ rabbit_event:notify(maintenance_draining, #{
+ reason => <<"node is being put into maintenance">>
+ }),
+ rabbit_log:alert("Node is ready to be shut down for maintenance or upgrade"),
+
+ ok.
+
+-spec revive() -> ok.
+revive() ->
+ case is_enabled() of
+ true -> do_revive();
+ false -> rabbit_log:warning("Feature flag `~s` is not enabled, reviving is a no-op", [?FEATURE_FLAG])
+ end.
+
+-spec do_revive() -> ok.
+do_revive() ->
+ rabbit_log:alert("This node is being revived from maintenance (drain) mode"),
+ revive_local_quorum_queue_replicas(),
+ rabbit_log:alert("Resumed all listeners and will accept client connections again"),
+ resume_all_client_listeners(),
+ rabbit_log:alert("Resumed all listeners and will accept client connections again"),
+ unmark_as_being_drained(),
+ rabbit_log:info("Marked this node as back from maintenance and ready to serve clients"),
+
+ %% allow plugins to react
+ rabbit_event:notify(maintenance_revived, #{}),
+
+ ok.
+
+-spec mark_as_being_drained() -> boolean().
+mark_as_being_drained() ->
+ rabbit_log:debug("Marking the node as undergoing maintenance"),
+ set_maintenance_status_status(?DRAINING_STATUS).
+
+-spec unmark_as_being_drained() -> boolean().
+unmark_as_being_drained() ->
+ rabbit_log:debug("Unmarking the node as undergoing maintenance"),
+ set_maintenance_status_status(?DEFAULT_STATUS).
+
+set_maintenance_status_status(Status) ->
+ Res = mnesia:transaction(fun () ->
+ case mnesia:wread({?TABLE, node()}) of
+ [] ->
+ Row = #node_maintenance_state{
+ node = node(),
+ status = Status
+ },
+ mnesia:write(?TABLE, Row, write);
+ [Row0] ->
+ Row = Row0#node_maintenance_state{
+ node = node(),
+ status = Status
+ },
+ mnesia:write(?TABLE, Row, write)
+ end
+ end),
+ case Res of
+ {atomic, ok} -> true;
+ _ -> false
+ end.
+
+
+-spec is_being_drained_local_read(node()) -> boolean().
+is_being_drained_local_read(Node) ->
+ Status = status_local_read(Node),
+ Status =:= ?DRAINING_STATUS.
+
+-spec is_being_drained_consistent_read(node()) -> boolean().
+is_being_drained_consistent_read(Node) ->
+ Status = status_consistent_read(Node),
+ Status =:= ?DRAINING_STATUS.
+
+-spec status_local_read(node()) -> maintenance_status().
+status_local_read(Node) ->
+ case catch mnesia:dirty_read(?TABLE, Node) of
+ [] -> ?DEFAULT_STATUS;
+ [#node_maintenance_state{node = Node, status = Status}] ->
+ Status;
+ _ -> ?DEFAULT_STATUS
+ end.
+
+-spec status_consistent_read(node()) -> maintenance_status().
+status_consistent_read(Node) ->
+ case mnesia:transaction(fun() -> mnesia:read(?TABLE, Node) end) of
+ {atomic, []} -> ?DEFAULT_STATUS;
+ {atomic, [#node_maintenance_state{node = Node, status = Status}]} ->
+ Status;
+ {atomic, _} -> ?DEFAULT_STATUS;
+ {aborted, _Reason} -> ?DEFAULT_STATUS
+ end.
+
+ -spec filter_out_drained_nodes_local_read([node()]) -> [node()].
+filter_out_drained_nodes_local_read(Nodes) ->
+ lists:filter(fun(N) -> not is_being_drained_local_read(N) end, Nodes).
+
+-spec filter_out_drained_nodes_consistent_read([node()]) -> [node()].
+filter_out_drained_nodes_consistent_read(Nodes) ->
+ lists:filter(fun(N) -> not is_being_drained_consistent_read(N) end, Nodes).
+
+-spec suspend_all_client_listeners() -> rabbit_types:ok_or_error(any()).
+ %% Pauses all listeners on the current node except for
+ %% Erlang distribution (clustering and CLI tools).
+ %% A respausedumed listener will not accept any new client connections
+ %% but previously established connections won't be interrupted.
+suspend_all_client_listeners() ->
+ Listeners = rabbit_networking:node_client_listeners(node()),
+ rabbit_log:info("Asked to suspend ~b client connection listeners. "
+ "No new client connections will be accepted until these listeners are resumed!", [length(Listeners)]),
+ Results = lists:foldl(local_listener_fold_fun(fun ranch:suspend_listener/1), [], Listeners),
+ lists:foldl(fun ok_or_first_error/2, ok, Results).
+
+ -spec resume_all_client_listeners() -> rabbit_types:ok_or_error(any()).
+ %% Resumes all listeners on the current node except for
+ %% Erlang distribution (clustering and CLI tools).
+ %% A resumed listener will accept new client connections.
+resume_all_client_listeners() ->
+ Listeners = rabbit_networking:node_client_listeners(node()),
+ rabbit_log:info("Asked to resume ~b client connection listeners. "
+ "New client connections will be accepted from now on", [length(Listeners)]),
+ Results = lists:foldl(local_listener_fold_fun(fun ranch:resume_listener/1), [], Listeners),
+ lists:foldl(fun ok_or_first_error/2, ok, Results).
+
+ -spec close_all_client_connections() -> {'ok', non_neg_integer()}.
+close_all_client_connections() ->
+ Pids = rabbit_networking:local_connections(),
+ rabbit_networking:close_connections(Pids, "Node was put into maintenance mode"),
+ {ok, length(Pids)}.
+
+-spec transfer_leadership_of_quorum_queues([node()]) -> ok.
+transfer_leadership_of_quorum_queues([]) ->
+ rabbit_log:warning("Skipping leadership transfer of quorum queues: no candidate "
+ "(online, not under maintenance) nodes to transfer to!");
+transfer_leadership_of_quorum_queues(_TransferCandidates) ->
+ %% we only transfer leadership for QQs that have local leaders
+ Queues = rabbit_amqqueue:list_local_leaders(),
+ rabbit_log:info("Will transfer leadership of ~b quorum queues with current leader on this node",
+ [length(Queues)]),
+ [begin
+ Name = amqqueue:get_name(Q),
+ rabbit_log:debug("Will trigger a leader election for local quorum queue ~s",
+ [rabbit_misc:rs(Name)]),
+ %% we trigger an election and exclude this node from the list of candidates
+ %% by simply shutting its local QQ replica (Ra server)
+ RaLeader = amqqueue:get_pid(Q),
+ rabbit_log:debug("Will stop Ra server ~p", [RaLeader]),
+ case ra:stop_server(RaLeader) of
+ ok ->
+ rabbit_log:debug("Successfully stopped Ra server ~p", [RaLeader]);
+ {error, nodedown} ->
+ rabbit_log:error("Failed to stop Ra server ~p: target node was reported as down")
+ end
+ end || Q <- Queues],
+ rabbit_log:info("Leadership transfer for quorum queues hosted on this node has been initiated").
+
+-spec transfer_leadership_of_classic_mirrored_queues([node()]) -> ok.
+ transfer_leadership_of_classic_mirrored_queues([]) ->
+ rabbit_log:warning("Skipping leadership transfer of classic mirrored queues: no candidate "
+ "(online, not under maintenance) nodes to transfer to!");
+transfer_leadership_of_classic_mirrored_queues(TransferCandidates) ->
+ Queues = rabbit_amqqueue:list_local_mirrored_classic_queues(),
+ ReadableCandidates = readable_candidate_list(TransferCandidates),
+ rabbit_log:info("Will transfer leadership of ~b classic mirrored queues hosted on this node to these peer nodes: ~s",
+ [length(Queues), ReadableCandidates]),
+
+ [begin
+ Name = amqqueue:get_name(Q),
+ case random_primary_replica_transfer_candidate_node(TransferCandidates) of
+ {ok, Pick} ->
+ rabbit_log:debug("Will transfer leadership of local queue ~s to node ~s",
+ [rabbit_misc:rs(Name), Pick]),
+ case rabbit_mirror_queue_misc:transfer_leadership(Q, Pick) of
+ {migrated, _} ->
+ rabbit_log:debug("Successfully transferred leadership of queue ~s to node ~s",
+ [rabbit_misc:rs(Name), Pick]);
+ Other ->
+ rabbit_log:warning("Could not transfer leadership of queue ~s to node ~s: ~p",
+ [rabbit_misc:rs(Name), Pick, Other])
+ end;
+ undefined ->
+ rabbit_log:warning("Could not transfer leadership of queue ~s: no suitable candidates?",
+ [Name])
+ end
+ end || Q <- Queues],
+ rabbit_log:info("Leadership transfer for local classic mirrored queues is complete").
+
+-spec stop_local_quorum_queue_followers() -> ok.
+stop_local_quorum_queue_followers() ->
+ Queues = rabbit_amqqueue:list_local_followers(),
+ rabbit_log:info("Will stop local follower replicas of ~b quorum queues on this node",
+ [length(Queues)]),
+ [begin
+ Name = amqqueue:get_name(Q),
+ rabbit_log:debug("Will stop a local follower replica of quorum queue ~s",
+ [rabbit_misc:rs(Name)]),
+ %% shut down Ra nodes so that they are not considered for leader election
+ {RegisteredName, _LeaderNode} = amqqueue:get_pid(Q),
+ RaNode = {RegisteredName, node()},
+ rabbit_log:debug("Will stop Ra server ~p", [RaNode]),
+ case ra:stop_server(RaNode) of
+ ok ->
+ rabbit_log:debug("Successfully stopped Ra server ~p", [RaNode]);
+ {error, nodedown} ->
+ rabbit_log:error("Failed to stop Ra server ~p: target node was reported as down")
+ end
+ end || Q <- Queues],
+ rabbit_log:info("Stopped all local replicas of quorum queues hosted on this node").
+
+ -spec primary_replica_transfer_candidate_nodes() -> [node()].
+primary_replica_transfer_candidate_nodes() ->
+ filter_out_drained_nodes_consistent_read(rabbit_nodes:all_running() -- [node()]).
+
+-spec random_primary_replica_transfer_candidate_node([node()]) -> {ok, node()} | undefined.
+random_primary_replica_transfer_candidate_node([]) ->
+ undefined;
+random_primary_replica_transfer_candidate_node(Candidates) ->
+ Nth = erlang:phash2(erlang:monotonic_time(), length(Candidates)),
+ Candidate = lists:nth(Nth + 1, Candidates),
+ {ok, Candidate}.
+
+revive_local_quorum_queue_replicas() ->
+ Queues = rabbit_amqqueue:list_local_followers(),
+ [begin
+ Name = amqqueue:get_name(Q),
+ rabbit_log:debug("Will trigger a leader election for local quorum queue ~s",
+ [rabbit_misc:rs(Name)]),
+ %% start local QQ replica (Ra server) of this queue
+ {Prefix, _Node} = amqqueue:get_pid(Q),
+ RaServer = {Prefix, node()},
+ rabbit_log:debug("Will start Ra server ~p", [RaServer]),
+ case ra:restart_server(RaServer) of
+ ok ->
+ rabbit_log:debug("Successfully restarted Ra server ~p", [RaServer]);
+ {error, {already_started, _Pid}} ->
+ rabbit_log:debug("Ra server ~p is already running", [RaServer]);
+ {error, nodedown} ->
+ rabbit_log:error("Failed to restart Ra server ~p: target node was reported as down")
+ end
+ end || Q <- Queues],
+ rabbit_log:info("Restart of local quorum queue replicas is complete").
+
+%%
+%% Implementation
+%%
+
+local_listener_fold_fun(Fun) ->
+ fun(#listener{node = Node, ip_address = Addr, port = Port}, Acc) when Node =:= node() ->
+ RanchRef = rabbit_networking:ranch_ref(Addr, Port),
+ [Fun(RanchRef) | Acc];
+ (_, Acc) ->
+ Acc
+ end.
+
+ok_or_first_error(ok, Acc) ->
+ Acc;
+ok_or_first_error({error, _} = Err, _Acc) ->
+ Err.
+
+readable_candidate_list(Nodes) ->
+ string:join(lists:map(fun rabbit_data_coercion:to_list/1, Nodes), ", ").
diff --git a/deps/rabbit/src/rabbit_memory_monitor.erl b/deps/rabbit/src/rabbit_memory_monitor.erl
new file mode 100644
index 0000000000..5934a97cff
--- /dev/null
+++ b/deps/rabbit/src/rabbit_memory_monitor.erl
@@ -0,0 +1,259 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+%% This module handles the node-wide memory statistics.
+%% It receives statistics from all queues, counts the desired
+%% queue length (in seconds), and sends this information back to
+%% queues.
+
+-module(rabbit_memory_monitor).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, register/2, deregister/1,
+ report_ram_duration/2, stop/0, conserve_resources/3, memory_use/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(process, {pid, reported, sent, callback, monitor}).
+
+-record(state, {timer, %% 'internal_update' timer
+ queue_durations, %% ets #process
+ queue_duration_sum, %% sum of all queue_durations
+ queue_duration_count, %% number of elements in sum
+ desired_duration, %% the desired queue duration
+ disk_alarm %% disable paging, disk alarm has fired
+ }).
+
+-define(SERVER, ?MODULE).
+-define(TABLE_NAME, ?MODULE).
+
+%% If all queues are pushed to disk (duration 0), then the sum of
+%% their reported lengths will be 0. If memory then becomes available,
+%% unless we manually intervene, the sum will remain 0, and the queues
+%% will never get a non-zero duration. Thus when the mem use is <
+%% SUM_INC_THRESHOLD, increase the sum artificially by SUM_INC_AMOUNT.
+-define(SUM_INC_THRESHOLD, 0.95).
+-define(SUM_INC_AMOUNT, 1.0).
+
+-define(EPSILON, 0.000001). %% less than this and we clamp to 0
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server2:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+-spec register(pid(), {atom(),atom(),[any()]}) -> 'ok'.
+
+register(Pid, MFA = {_M, _F, _A}) ->
+ gen_server2:call(?SERVER, {register, Pid, MFA}, infinity).
+
+-spec deregister(pid()) -> 'ok'.
+
+deregister(Pid) ->
+ gen_server2:cast(?SERVER, {deregister, Pid}).
+
+-spec report_ram_duration
+ (pid(), float() | 'infinity') -> number() | 'infinity'.
+
+report_ram_duration(Pid, QueueDuration) ->
+ gen_server2:call(?SERVER,
+ {report_ram_duration, Pid, QueueDuration}, infinity).
+
+-spec stop() -> 'ok'.
+
+stop() ->
+ gen_server2:cast(?SERVER, stop).
+
+%% Paging should be enabled/disabled only in response to disk resource alarms
+%% for the current node.
+conserve_resources(Pid, disk, {_, Conserve, Node}) when node(Pid) =:= Node ->
+ gen_server2:cast(Pid, {disk_alarm, Conserve});
+conserve_resources(_Pid, _Source, _Conserve) ->
+ ok.
+
+memory_use(Type) ->
+ vm_memory_monitor:get_memory_use(Type).
+
+%%----------------------------------------------------------------------------
+%% Gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, Interval} = application:get_env(rabbit, memory_monitor_interval),
+ {ok, TRef} = timer:send_interval(Interval, update),
+
+ Ets = ets:new(?TABLE_NAME, [set, private, {keypos, #process.pid}]),
+ Alarms = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ {ok, internal_update(
+ #state { timer = TRef,
+ queue_durations = Ets,
+ queue_duration_sum = 0.0,
+ queue_duration_count = 0,
+ desired_duration = infinity,
+ disk_alarm = lists:member(disk, Alarms)})}.
+
+handle_call({report_ram_duration, Pid, QueueDuration}, From,
+ State = #state { queue_duration_sum = Sum,
+ queue_duration_count = Count,
+ queue_durations = Durations,
+ desired_duration = SendDuration }) ->
+
+ [Proc = #process { reported = PrevQueueDuration }] =
+ ets:lookup(Durations, Pid),
+
+ gen_server2:reply(From, SendDuration),
+
+ {Sum1, Count1} =
+ case {PrevQueueDuration, QueueDuration} of
+ {infinity, infinity} -> {Sum, Count};
+ {infinity, _} -> {Sum + QueueDuration, Count + 1};
+ {_, infinity} -> {Sum - PrevQueueDuration, Count - 1};
+ {_, _} -> {Sum - PrevQueueDuration + QueueDuration,
+ Count}
+ end,
+ true = ets:insert(Durations, Proc #process { reported = QueueDuration,
+ sent = SendDuration }),
+ {noreply, State #state { queue_duration_sum = zero_clamp(Sum1),
+ queue_duration_count = Count1 }};
+
+handle_call({register, Pid, MFA}, _From,
+ State = #state { queue_durations = Durations }) ->
+ MRef = erlang:monitor(process, Pid),
+ true = ets:insert(Durations, #process { pid = Pid, reported = infinity,
+ sent = infinity, callback = MFA,
+ monitor = MRef }),
+ {reply, ok, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast({disk_alarm, Alarm}, State = #state{disk_alarm = Alarm}) ->
+ {noreply, State};
+
+handle_cast({disk_alarm, Alarm}, State) ->
+ {noreply, internal_update(State#state{disk_alarm = Alarm})};
+
+handle_cast({deregister, Pid}, State) ->
+ {noreply, internal_deregister(Pid, true, State)};
+
+handle_cast(stop, State) ->
+ {stop, normal, State};
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(update, State) ->
+ {noreply, internal_update(State)};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) ->
+ {noreply, internal_deregister(Pid, false, State)};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, #state { timer = TRef }) ->
+ timer:cancel(TRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+
+zero_clamp(Sum) when Sum < ?EPSILON -> 0.0;
+zero_clamp(Sum) -> Sum.
+
+internal_deregister(Pid, Demonitor,
+ State = #state { queue_duration_sum = Sum,
+ queue_duration_count = Count,
+ queue_durations = Durations }) ->
+ case ets:lookup(Durations, Pid) of
+ [] -> State;
+ [#process { reported = PrevQueueDuration, monitor = MRef }] ->
+ true = case Demonitor of
+ true -> erlang:demonitor(MRef);
+ false -> true
+ end,
+ {Sum1, Count1} =
+ case PrevQueueDuration of
+ infinity -> {Sum, Count};
+ _ -> {zero_clamp(Sum - PrevQueueDuration),
+ Count - 1}
+ end,
+ true = ets:delete(Durations, Pid),
+ State #state { queue_duration_sum = Sum1,
+ queue_duration_count = Count1 }
+ end.
+
+internal_update(State = #state{queue_durations = Durations,
+ desired_duration = DesiredDurationAvg,
+ disk_alarm = DiskAlarm}) ->
+ DesiredDurationAvg1 = desired_duration_average(State),
+ ShouldInform = should_inform_predicate(DiskAlarm),
+ case ShouldInform(DesiredDurationAvg, DesiredDurationAvg1) of
+ true -> inform_queues(ShouldInform, DesiredDurationAvg1, Durations);
+ false -> ok
+ end,
+ State#state{desired_duration = DesiredDurationAvg1}.
+
+desired_duration_average(#state{disk_alarm = true}) ->
+ infinity;
+desired_duration_average(#state{disk_alarm = false,
+ queue_duration_sum = Sum,
+ queue_duration_count = Count}) ->
+ {ok, LimitThreshold} =
+ application:get_env(rabbit, vm_memory_high_watermark_paging_ratio),
+ MemoryRatio = memory_use(ratio),
+ if MemoryRatio =:= infinity ->
+ 0.0;
+ MemoryRatio < LimitThreshold orelse Count == 0 ->
+ infinity;
+ MemoryRatio < ?SUM_INC_THRESHOLD ->
+ ((Sum + ?SUM_INC_AMOUNT) / Count) / MemoryRatio;
+ true ->
+ (Sum / Count) / MemoryRatio
+ end.
+
+inform_queues(ShouldInform, DesiredDurationAvg, Durations) ->
+ true =
+ ets:foldl(
+ fun (Proc = #process{reported = QueueDuration,
+ sent = PrevSendDuration,
+ callback = {M, F, A}}, true) ->
+ case ShouldInform(PrevSendDuration, DesiredDurationAvg)
+ andalso ShouldInform(QueueDuration, DesiredDurationAvg) of
+ true -> ok = erlang:apply(
+ M, F, A ++ [DesiredDurationAvg]),
+ ets:insert(
+ Durations,
+ Proc#process{sent = DesiredDurationAvg});
+ false -> true
+ end
+ end, true, Durations).
+
+%% In normal use, we only inform queues immediately if the desired
+%% duration has decreased, we want to ensure timely paging.
+should_inform_predicate(false) -> fun greater_than/2;
+%% When the disk alarm has gone off though, we want to inform queues
+%% immediately if the desired duration has *increased* - we want to
+%% ensure timely stopping paging.
+should_inform_predicate(true) -> fun (D1, D2) -> greater_than(D2, D1) end.
+
+greater_than(infinity, infinity) -> false;
+greater_than(infinity, _D2) -> true;
+greater_than(_D1, infinity) -> false;
+greater_than(D1, D2) -> D1 > D2.
diff --git a/deps/rabbit/src/rabbit_metrics.erl b/deps/rabbit/src/rabbit_metrics.erl
new file mode 100644
index 0000000000..10418e3884
--- /dev/null
+++ b/deps/rabbit/src/rabbit_metrics.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_metrics).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+%% Starts the raw metrics storage and owns the ETS tables.
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+init([]) ->
+ rabbit_core_metrics:init(),
+ {ok, none}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl b/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl
new file mode 100644
index 0000000000..91a7c3ddc8
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl
@@ -0,0 +1,460 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_coordinator).
+
+-export([start_link/4, get_gm/1, ensure_monitoring/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, handle_pre_hibernate/1]).
+
+-export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
+
+-behaviour(gen_server2).
+-behaviour(gm).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+-include("gm_specs.hrl").
+
+-record(state, { q,
+ gm,
+ monitors,
+ death_fun,
+ depth_fun
+ }).
+
+%%----------------------------------------------------------------------------
+%%
+%% Mirror Queues
+%%
+%% A queue with mirrors consists of the following:
+%%
+%% #amqqueue{ pid, slave_pids }
+%% | |
+%% +----------+ +-------+--------------+-----------...etc...
+%% | | |
+%% V V V
+%% amqqueue_process---+ mirror-----+ mirror-----+ ...etc...
+%% | BQ = master----+ | | BQ = vq | | BQ = vq |
+%% | | BQ = vq | | +-+-------+ +-+-------+
+%% | +-+-------+ | | |
+%% +-++-----|---------+ | | (some details elided)
+%% || | | |
+%% || coordinator-+ | |
+%% || +-+---------+ | |
+%% || | | |
+%% || gm-+ -- -- -- -- gm-+- -- -- -- gm-+- -- --...etc...
+%% || +--+ +--+ +--+
+%% ||
+%% consumers
+%%
+%% The master is merely an implementation of bq, and thus is invoked
+%% through the normal bq interface by the amqqueue_process. The mirrors
+%% meanwhile are processes in their own right (as is the
+%% coordinator). The coordinator and all mirrors belong to the same gm
+%% group. Every member of a gm group receives messages sent to the gm
+%% group. Because the master is the bq of amqqueue_process, it doesn't
+%% have sole control over its mailbox, and as a result, the master
+%% itself cannot be passed messages directly (well, it could by via
+%% the amqqueue:run_backing_queue callback but that would induce
+%% additional unnecessary loading on the master queue process), yet it
+%% needs to react to gm events, such as the death of mirrors. Thus the
+%% master creates the coordinator, and it is the coordinator that is
+%% the gm callback module and event handler for the master.
+%%
+%% Consumers are only attached to the master. Thus the master is
+%% responsible for informing all mirrors when messages are fetched from
+%% the bq, when they're acked, and when they're requeued.
+%%
+%% The basic goal is to ensure that all mirrors performs actions on
+%% their bqs in the same order as the master. Thus the master
+%% intercepts all events going to its bq, and suitably broadcasts
+%% these events on the gm. The mirrors thus receive two streams of
+%% events: one stream is via the gm, and one stream is from channels
+%% directly. Whilst the stream via gm is guaranteed to be consistently
+%% seen by all mirrors , the same is not true of the stream via
+%% channels. For example, in the event of an unexpected death of a
+%% channel during a publish, only some of the mirrors may receive that
+%% publish. As a result of this problem, the messages broadcast over
+%% the gm contain published content, and thus mirrors can operate
+%% successfully on messages that they only receive via the gm.
+%%
+%% The key purpose of also sending messages directly from the channels
+%% to the mirrors is that without this, in the event of the death of
+%% the master, messages could be lost until a suitable mirror is
+%% promoted. However, that is not the only reason. A mirror cannot send
+%% confirms for a message until it has seen it from the
+%% channel. Otherwise, it might send a confirm to a channel for a
+%% message that it might *never* receive from that channel. This can
+%% happen because new mirrors join the gm ring (and thus receive
+%% messages from the master) before inserting themselves in the
+%% queue's mnesia record (which is what channels look at for routing).
+%% As it turns out, channels will simply ignore such bogus confirms,
+%% but relying on that would introduce a dangerously tight coupling.
+%%
+%% Hence the mirrors have to wait until they've seen both the publish
+%% via gm, and the publish via the channel before they issue the
+%% confirm. Either form of publish can arrive first, and a mirror can
+%% be upgraded to the master at any point during this
+%% process. Confirms continue to be issued correctly, however.
+%%
+%% Because the mirror is a full process, it impersonates parts of the
+%% amqqueue API. However, it does not need to implement all parts: for
+%% example, no ack or consumer-related message can arrive directly at
+%% a mirror from a channel: it is only publishes that pass both
+%% directly to the mirrors and go via gm.
+%%
+%% Slaves can be added dynamically. When this occurs, there is no
+%% attempt made to sync the current contents of the master with the
+%% new mirror, thus the mirror will start empty, regardless of the state
+%% of the master. Thus the mirror needs to be able to detect and ignore
+%% operations which are for messages it has not received: because of
+%% the strict FIFO nature of queues in general, this is
+%% straightforward - all new publishes that the new mirror receives via
+%% gm should be processed as normal, but fetches which are for
+%% messages the mirror has never seen should be ignored. Similarly,
+%% acks for messages the mirror never fetched should be
+%% ignored. Similarly, we don't republish rejected messages that we
+%% haven't seen. Eventually, as the master is consumed from, the
+%% messages at the head of the queue which were there before the slave
+%% joined will disappear, and the mirror will become fully synced with
+%% the state of the master.
+%%
+%% The detection of the sync-status is based on the depth of the BQs,
+%% where the depth is defined as the sum of the length of the BQ (as
+%% per BQ:len) and the messages pending an acknowledgement. When the
+%% depth of the mirror is equal to the master's, then the mirror is
+%% synchronised. We only store the difference between the two for
+%% simplicity. Comparing the length is not enough since we need to
+%% take into account rejected messages which will make it back into
+%% the master queue but can't go back in the mirror, since we don't
+%% want "holes" in the mirror queue. Note that the depth, and the
+%% length likewise, must always be shorter on the mirror - we assert
+%% that in various places. In case mirrors are joined to an empty queue
+%% which only goes on to receive publishes, they start by asking the
+%% master to broadcast its depth. This is enough for mirrors to always
+%% be able to work out when their head does not differ from the master
+%% (and is much simpler and cheaper than getting the master to hang on
+%% to the guid of the msg at the head of its queue). When a mirror is
+%% promoted to a master, it unilaterally broadcasts its depth, in
+%% order to solve the problem of depth requests from new mirrors being
+%% unanswered by a dead master.
+%%
+%% Obviously, due to the async nature of communication across gm, the
+%% mirrors can fall behind. This does not matter from a sync pov: if
+%% they fall behind and the master dies then a) no publishes are lost
+%% because all publishes go to all mirrors anyway; b) the worst that
+%% happens is that acks get lost and so messages come back to
+%% life. This is no worse than normal given you never get confirmation
+%% that an ack has been received (not quite true with QoS-prefetch,
+%% but close enough for jazz).
+%%
+%% Because acktags are issued by the bq independently, and because
+%% there is no requirement for the master and all mirrors to use the
+%% same bq, all references to msgs going over gm is by msg_id. Thus
+%% upon acking, the master must convert the acktags back to msg_ids
+%% (which happens to be what bq:ack returns), then sends the msg_ids
+%% over gm, the mirrors must convert the msg_ids to acktags (a mapping
+%% the mirrors themselves must maintain).
+%%
+%% When the master dies, a mirror gets promoted. This will be the
+%% eldest mirror, and thus the hope is that that mirror is most likely
+%% to be sync'd with the master. The design of gm is that the
+%% notification of the death of the master will only appear once all
+%% messages in-flight from the master have been fully delivered to all
+%% members of the gm group. Thus at this point, the mirror that gets
+%% promoted cannot broadcast different events in a different order
+%% than the master for the same msgs: there is no possibility for the
+%% same msg to be processed by the old master and the new master - if
+%% it was processed by the old master then it will have been processed
+%% by the mirror before the mirror was promoted, and vice versa.
+%%
+%% Upon promotion, all msgs pending acks are requeued as normal, the
+%% mirror constructs state suitable for use in the master module, and
+%% then dynamically changes into an amqqueue_process with the master
+%% as the bq, and the slave's bq as the master's bq. Thus the very
+%% same process that was the mirror is now a full amqqueue_process.
+%%
+%% It is important that we avoid memory leaks due to the death of
+%% senders (i.e. channels) and partial publications. A sender
+%% publishing a message may fail mid way through the publish and thus
+%% only some of the mirrors will receive the message. We need the
+%% mirrors to be able to detect this and tidy up as necessary to avoid
+%% leaks. If we just had the master monitoring all senders then we
+%% would have the possibility that a sender appears and only sends the
+%% message to a few of the mirrors before dying. Those mirrors would
+%% then hold on to the message, assuming they'll receive some
+%% instruction eventually from the master. Thus we have both mirrors
+%% and the master monitor all senders they become aware of. But there
+%% is a race: if the mirror receives a DOWN of a sender, how does it
+%% know whether or not the master is going to send it instructions
+%% regarding those messages?
+%%
+%% Whilst the master monitors senders, it can't access its mailbox
+%% directly, so it delegates monitoring to the coordinator. When the
+%% coordinator receives a DOWN message from a sender, it informs the
+%% master via a callback. This allows the master to do any tidying
+%% necessary, but more importantly allows the master to broadcast a
+%% sender_death message to all the mirrors , saying the sender has
+%% died. Once the mirrors receive the sender_death message, they know
+%% that they're not going to receive any more instructions from the gm
+%% regarding that sender. However, it is possible that the coordinator
+%% receives the DOWN and communicates that to the master before the
+%% master has finished receiving and processing publishes from the
+%% sender. This turns out not to be a problem: the sender has actually
+%% died, and so will not need to receive confirms or other feedback,
+%% and should further messages be "received" from the sender, the
+%% master will ask the coordinator to set up a new monitor, and
+%% will continue to process the messages normally. Slaves may thus
+%% receive publishes via gm from previously declared "dead" senders,
+%% but again, this is fine: should the mirror have just thrown out the
+%% message it had received directly from the sender (due to receiving
+%% a sender_death message via gm), it will be able to cope with the
+%% publication purely from the master via gm.
+%%
+%% When a mirror receives a DOWN message for a sender, if it has not
+%% received the sender_death message from the master via gm already,
+%% then it will wait 20 seconds before broadcasting a request for
+%% confirmation from the master that the sender really has died.
+%% Should a sender have only sent a publish to mirrors , this allows
+%% mirrors to inform the master of the previous existence of the
+%% sender. The master will thus monitor the sender, receive the DOWN,
+%% and subsequently broadcast the sender_death message, allowing the
+%% mirrors to tidy up. This process can repeat for the same sender:
+%% consider one mirror receives the publication, then the DOWN, then
+%% asks for confirmation of death, then the master broadcasts the
+%% sender_death message. Only then does another mirror receive the
+%% publication and thus set up its monitoring. Eventually that slave
+%% too will receive the DOWN, ask for confirmation and the master will
+%% monitor the sender again, receive another DOWN, and send out
+%% another sender_death message. Given the 20 second delay before
+%% requesting death confirmation, this is highly unlikely, but it is a
+%% possibility.
+%%
+%% When the 20 second timer expires, the mirror first checks to see
+%% whether it still needs confirmation of the death before requesting
+%% it. This prevents unnecessary traffic on gm as it allows one
+%% broadcast of the sender_death message to satisfy many mirrors.
+%%
+%% If we consider the promotion of a mirror at this point, we have two
+%% possibilities: that of the mirror that has received the DOWN and is
+%% thus waiting for confirmation from the master that the sender
+%% really is down; and that of the mirror that has not received the
+%% DOWN. In the first case, in the act of promotion to master, the new
+%% master will monitor again the dead sender, and after it has
+%% finished promoting itself, it should find another DOWN waiting,
+%% which it will then broadcast. This will allow mirrors to tidy up as
+%% normal. In the second case, we have the possibility that
+%% confirmation-of-sender-death request has been broadcast, but that
+%% it was broadcast before the master failed, and that the mirror being
+%% promoted does not know anything about that sender, and so will not
+%% monitor it on promotion. Thus a mirror that broadcasts such a
+%% request, at the point of broadcasting it, recurses, setting another
+%% 20 second timer. As before, on expiry of the timer, the mirrors
+%% checks to see whether it still has not received a sender_death
+%% message for the dead sender, and if not, broadcasts a death
+%% confirmation request. Thus this ensures that even when a master
+%% dies and the new mirror has no knowledge of the dead sender, it will
+%% eventually receive a death confirmation request, shall monitor the
+%% dead sender, receive the DOWN and broadcast the sender_death
+%% message.
+%%
+%% The preceding commentary deals with the possibility of mirrors
+%% receiving publications from senders which the master does not, and
+%% the need to prevent memory leaks in such scenarios. The inverse is
+%% also possible: a partial publication may cause only the master to
+%% receive a publication. It will then publish the message via gm. The
+%% mirrors will receive it via gm, will publish it to their BQ and will
+%% set up monitoring on the sender. They will then receive the DOWN
+%% message and the master will eventually publish the corresponding
+%% sender_death message. The mirror will then be able to tidy up its
+%% state as normal.
+%%
+%% Recovery of mirrored queues is straightforward: as nodes die, the
+%% remaining nodes record this, and eventually a situation is reached
+%% in which only one node is alive, which is the master. This is the
+%% only node which, upon recovery, will resurrect a mirrored queue:
+%% nodes which die and then rejoin as a mirror will start off empty as
+%% if they have no mirrored content at all. This is not surprising: to
+%% achieve anything more sophisticated would require the master and
+%% recovering mirror to be able to check to see whether they agree on
+%% the last seen state of the queue: checking depth alone is not
+%% sufficient in this case.
+%%
+%% For more documentation see the comments in bug 23554.
+%%
+%%----------------------------------------------------------------------------
+
+-spec start_link
+ (amqqueue:amqqueue(), pid() | 'undefined',
+ rabbit_mirror_queue_master:death_fun(),
+ rabbit_mirror_queue_master:depth_fun()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Queue, GM, DeathFun, DepthFun) ->
+ gen_server2:start_link(?MODULE, [Queue, GM, DeathFun, DepthFun], []).
+
+-spec get_gm(pid()) -> pid().
+
+get_gm(CPid) ->
+ gen_server2:call(CPid, get_gm, infinity).
+
+-spec ensure_monitoring(pid(), [pid()]) -> 'ok'.
+
+ensure_monitoring(CPid, Pids) ->
+ gen_server2:cast(CPid, {ensure_monitoring, Pids}).
+
+%% ---------------------------------------------------------------------------
+%% gen_server
+%% ---------------------------------------------------------------------------
+
+init([Q, GM, DeathFun, DepthFun]) when ?is_amqqueue(Q) ->
+ QueueName = amqqueue:get_name(Q),
+ ?store_proc_name(QueueName),
+ GM1 = case GM of
+ undefined ->
+ {ok, GM2} = gm:start_link(
+ QueueName, ?MODULE, [self()],
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ receive {joined, GM2, _Members} ->
+ ok
+ end,
+ GM2;
+ _ ->
+ true = link(GM),
+ GM
+ end,
+ {ok, #state { q = Q,
+ gm = GM1,
+ monitors = pmon:new(),
+ death_fun = DeathFun,
+ depth_fun = DepthFun },
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call(get_gm, _From, State = #state { gm = GM }) ->
+ reply(GM, State).
+
+handle_cast({gm_deaths, DeadGMPids}, State = #state{q = Q}) when ?amqqueue_pid_runs_on_local_node(Q) ->
+ QueueName = amqqueue:get_name(Q),
+ MPid = amqqueue:get_pid(Q),
+ case rabbit_mirror_queue_misc:remove_from_queue(
+ QueueName, MPid, DeadGMPids) of
+ {ok, MPid, DeadPids, ExtraNodes} ->
+ rabbit_mirror_queue_misc:report_deaths(MPid, true, QueueName,
+ DeadPids),
+ rabbit_mirror_queue_misc:add_mirrors(QueueName, ExtraNodes, async),
+ noreply(State);
+ {ok, _MPid0, DeadPids, _ExtraNodes} ->
+ %% see rabbitmq-server#914;
+ %% Different mirror is now master, stop current coordinator normally.
+ %% Initiating queue is now mirror and the least we could do is report
+ %% deaths which we 'think' we saw.
+ %% NOTE: Reported deaths here, could be inconsistent.
+ rabbit_mirror_queue_misc:report_deaths(MPid, false, QueueName,
+ DeadPids),
+ {stop, shutdown, State};
+ {error, not_found} ->
+ {stop, normal, State};
+ {error, {not_synced, _}} ->
+ rabbit_log:error("Mirror queue ~p in unexpected state."
+ " Promoted to master but already a master.",
+ [QueueName]),
+ error(unexpected_mirrored_state)
+ end;
+
+handle_cast(request_depth, State = #state{depth_fun = DepthFun, q = QArg}) when ?is_amqqueue(QArg) ->
+ QName = amqqueue:get_name(QArg),
+ MPid = amqqueue:get_pid(QArg),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, QFound} when ?amqqueue_pid_equals(QFound, MPid) ->
+ ok = DepthFun(),
+ noreply(State);
+ _ ->
+ {stop, shutdown, State}
+ end;
+
+handle_cast({ensure_monitoring, Pids}, State = #state { monitors = Mons }) ->
+ noreply(State #state { monitors = pmon:monitor_all(Pids, Mons) });
+
+handle_cast({delete_and_terminate, {shutdown, ring_shutdown}}, State) ->
+ {stop, normal, State};
+handle_cast({delete_and_terminate, Reason}, State) ->
+ {stop, Reason, State}.
+
+handle_info({'DOWN', _MonitorRef, process, Pid, _Reason},
+ State = #state { monitors = Mons,
+ death_fun = DeathFun }) ->
+ noreply(case pmon:is_monitored(Pid, Mons) of
+ false -> State;
+ true -> ok = DeathFun(Pid),
+ State #state { monitors = pmon:erase(Pid, Mons) }
+ end);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(_Reason, #state{}) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_pre_hibernate(State = #state { gm = GM }) ->
+ %% Since GM notifications of deaths are lazy we might not get a
+ %% timely notification of mirror death if policy changes when
+ %% everything is idle. So cause some activity just before we
+ %% sleep. This won't cause us to go into perpetual motion as the
+ %% heartbeat does not wake up coordinator or mirrors.
+ gm:broadcast(GM, hibernate_heartbeat),
+ {hibernate, State}.
+
+%% ---------------------------------------------------------------------------
+%% GM
+%% ---------------------------------------------------------------------------
+
+joined([CPid], Members) ->
+ CPid ! {joined, self(), Members},
+ ok.
+
+members_changed([_CPid], _Births, []) ->
+ ok;
+members_changed([CPid], _Births, Deaths) ->
+ ok = gen_server2:cast(CPid, {gm_deaths, Deaths}).
+
+handle_msg([CPid], _From, request_depth = Msg) ->
+ ok = gen_server2:cast(CPid, Msg);
+handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) ->
+ ok = gen_server2:cast(CPid, Msg);
+handle_msg([_CPid], _From, {delete_and_terminate, _Reason}) ->
+ %% We tell GM to stop, but we don't instruct the coordinator to
+ %% stop yet. The GM will first make sure all pending messages were
+ %% actually delivered. Then it calls handle_terminate/2 below so the
+ %% coordinator is stopped.
+ %%
+ %% If we stop the coordinator right now, remote mirrors could see the
+ %% coordinator DOWN before delete_and_terminate was delivered to all
+ %% GMs. One of those GM would be promoted as the master, and this GM
+ %% would hang forever, waiting for other GMs to stop.
+ {stop, {shutdown, ring_shutdown}};
+handle_msg([_CPid], _From, _Msg) ->
+ ok.
+
+handle_terminate([CPid], Reason) ->
+ ok = gen_server2:cast(CPid, {delete_and_terminate, Reason}),
+ ok.
+
+%% ---------------------------------------------------------------------------
+%% Others
+%% ---------------------------------------------------------------------------
+
+noreply(State) ->
+ {noreply, State, hibernate}.
+
+reply(Reply, State) ->
+ {reply, Reply, State, hibernate}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_master.erl b/deps/rabbit/src/rabbit_mirror_queue_master.erl
new file mode 100644
index 0000000000..71146e1ce2
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_master.erl
@@ -0,0 +1,578 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_master).
+
+-export([init/3, terminate/2, delete_and_terminate/2,
+ purge/1, purge_acks/1, publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
+ discard/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3,
+ len/1, is_empty/1, depth/1, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, set_ram_duration_target/2, ram_duration/1,
+ needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1,
+ msg_rates/1, info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4, handle_info/2]).
+
+-export([start/2, stop/1, delete_crashed/1]).
+
+-export([promote_backing_queue_state/8, sender_death_fun/0, depth_fun/0]).
+
+-export([init_with_existing_bq/3, stop_mirroring/1, sync_mirrors/3]).
+
+-behaviour(rabbit_backing_queue).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-record(state, { name,
+ gm,
+ coordinator,
+ backing_queue,
+ backing_queue_state,
+ seen_status,
+ confirmed,
+ known_senders,
+ wait_timeout
+ }).
+
+-export_type([death_fun/0, depth_fun/0, stats_fun/0]).
+
+-type death_fun() :: fun ((pid()) -> 'ok').
+-type depth_fun() :: fun (() -> 'ok').
+-type stats_fun() :: fun ((any()) -> 'ok').
+-type master_state() :: #state { name :: rabbit_amqqueue:name(),
+ gm :: pid(),
+ coordinator :: pid(),
+ backing_queue :: atom(),
+ backing_queue_state :: any(),
+ seen_status :: map(),
+ confirmed :: [rabbit_guid:guid()],
+ known_senders :: sets:set()
+ }.
+
+%% For general documentation of HA design, see
+%% rabbit_mirror_queue_coordinator
+
+%% ---------------------------------------------------------------------------
+%% Backing queue
+%% ---------------------------------------------------------------------------
+
+-spec start(_, _) -> no_return().
+start(_Vhost, _DurableQueues) ->
+ %% This will never get called as this module will never be
+ %% installed as the default BQ implementation.
+ exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
+-spec stop(_) -> no_return().
+stop(_Vhost) ->
+ %% Same as start/1.
+ exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
+-spec delete_crashed(_) -> no_return().
+delete_crashed(_QName) ->
+ exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
+init(Q, Recover, AsyncCallback) ->
+ {ok, BQ} = application:get_env(backing_queue_module),
+ BQS = BQ:init(Q, Recover, AsyncCallback),
+ State = #state{gm = GM} = init_with_existing_bq(Q, BQ, BQS),
+ ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
+ State.
+
+-spec init_with_existing_bq(amqqueue:amqqueue(), atom(), any()) ->
+ master_state().
+
+init_with_existing_bq(Q0, BQ, BQS) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ case rabbit_mirror_queue_coordinator:start_link(
+ Q0, undefined, sender_death_fun(), depth_fun()) of
+ {ok, CPid} ->
+ GM = rabbit_mirror_queue_coordinator:get_gm(CPid),
+ Self = self(),
+ Fun = fun () ->
+ [Q1] = mnesia:read({rabbit_queue, QName}),
+ true = amqqueue:is_amqqueue(Q1),
+ GMPids0 = amqqueue:get_gm_pids(Q1),
+ GMPids1 = [{GM, Self} | GMPids0],
+ Q2 = amqqueue:set_gm_pids(Q1, GMPids1),
+ Q3 = amqqueue:set_state(Q2, live),
+ %% amqqueue migration:
+ %% The amqqueue was read from this transaction, no
+ %% need to handle migration.
+ ok = rabbit_amqqueue:store_queue(Q3)
+ end,
+ ok = rabbit_misc:execute_mnesia_transaction(Fun),
+ {_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q0),
+ %% We need synchronous add here (i.e. do not return until the
+ %% mirror is running) so that when queue declaration is finished
+ %% all mirrors are up; we don't want to end up with unsynced mirrors
+ %% just by declaring a new queue. But add can't be synchronous all
+ %% the time as it can be called by mirrors and that's
+ %% deadlock-prone.
+ rabbit_mirror_queue_misc:add_mirrors(QName, SNodes, sync),
+ #state{name = QName,
+ gm = GM,
+ coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = #{},
+ confirmed = [],
+ known_senders = sets:new(),
+ wait_timeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000)};
+ {error, Reason} ->
+ %% The GM can shutdown before the coordinator has started up
+ %% (lost membership or missing group), thus the start_link of
+ %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
+ % is trapping exists
+ throw({coordinator_not_started, Reason})
+ end.
+
+-spec stop_mirroring(master_state()) -> {atom(), any()}.
+
+stop_mirroring(State = #state { coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ unlink(CPid),
+ stop_all_slaves(shutdown, State),
+ {BQ, BQS}.
+
+-spec sync_mirrors(stats_fun(), stats_fun(), master_state()) ->
+ {'ok', master_state()} | {stop, any(), master_state()}.
+
+sync_mirrors(HandleInfo, EmitStats,
+ State = #state { name = QName,
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Log = fun (Fmt, Params) ->
+ rabbit_mirror_queue_misc:log_info(
+ QName, "Synchronising: " ++ Fmt ++ "~n", Params)
+ end,
+ Log("~p messages to synchronise", [BQ:len(BQS)]),
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ SPids = amqqueue:get_slave_pids(Q),
+ SyncBatchSize = rabbit_mirror_queue_misc:sync_batch_size(Q),
+ Log("batch size: ~p", [SyncBatchSize]),
+ Ref = make_ref(),
+ Syncer = rabbit_mirror_queue_sync:master_prepare(Ref, QName, Log, SPids),
+ gm:broadcast(GM, {sync_start, Ref, Syncer, SPids}),
+ S = fun(BQSN) -> State#state{backing_queue_state = BQSN} end,
+ case rabbit_mirror_queue_sync:master_go(
+ Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, BQ, BQS) of
+ {cancelled, BQS1} -> Log(" synchronisation cancelled ", []),
+ {ok, S(BQS1)};
+ {shutdown, R, BQS1} -> {stop, R, S(BQS1)};
+ {sync_died, R, BQS1} -> Log("~p", [R]),
+ {ok, S(BQS1)};
+ {already_synced, BQS1} -> {ok, S(BQS1)};
+ {ok, BQS1} -> Log("complete", []),
+ {ok, S(BQS1)}
+ end.
+
+terminate({shutdown, dropped} = Reason,
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ %% Backing queue termination - this node has been explicitly
+ %% dropped. Normally, non-durable queues would be tidied up on
+ %% startup, but there's a possibility that we will be added back
+ %% in without this node being restarted. Thus we must do the full
+ %% blown delete_and_terminate now, but only locally: we do not
+ %% broadcast delete_and_terminate.
+ State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)};
+
+terminate(Reason,
+ State = #state { name = QName,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ %% Backing queue termination. The queue is going down but
+ %% shouldn't be deleted. Most likely safe shutdown of this
+ %% node.
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ SSPids = amqqueue:get_sync_slave_pids(Q),
+ case SSPids =:= [] andalso
+ rabbit_policy:get(<<"ha-promote-on-shutdown">>, Q) =/= <<"always">> of
+ true -> %% Remove the whole queue to avoid data loss
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Stopping all nodes on master shutdown since no "
+ "synchronised mirror (replica) is available~n", []),
+ stop_all_slaves(Reason, State);
+ false -> %% Just let some other mirror take over.
+ ok
+ end,
+ State #state { backing_queue_state = BQ:terminate(Reason, BQS) }.
+
+delete_and_terminate(Reason, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ stop_all_slaves(Reason, State),
+ State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}.
+
+stop_all_slaves(Reason, #state{name = QName, gm = GM, wait_timeout = WT}) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ SPids = amqqueue:get_slave_pids(Q),
+ rabbit_mirror_queue_misc:stop_all_slaves(Reason, SPids, QName, GM, WT).
+
+purge(State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {drop, 0, BQ:len(BQS), false}),
+ {Count, BQS1} = BQ:purge(BQS),
+ {Count, State #state { backing_queue_state = BQS1 }}.
+
+-spec purge_acks(_) -> no_return().
+purge_acks(_State) -> exit({not_implemented, {?MODULE, purge_acks}}).
+
+publish(Msg = #basic_message { id = MsgId }, MsgProps, IsDelivered, ChPid, Flow,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ false = maps:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {publish, ChPid, Flow, MsgProps, Msg},
+ rabbit_basic:msg_size(Msg)),
+ BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS),
+ ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
+
+batch_publish(Publishes, ChPid, Flow,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Publishes1, false, MsgSizes} =
+ lists:foldl(fun ({Msg = #basic_message { id = MsgId },
+ MsgProps, _IsDelivered}, {Pubs, false, Sizes}) ->
+ {[{Msg, MsgProps, true} | Pubs], %% [0]
+ false = maps:is_key(MsgId, SS), %% ASSERTION
+ Sizes + rabbit_basic:msg_size(Msg)}
+ end, {[], false, 0}, Publishes),
+ Publishes2 = lists:reverse(Publishes1),
+ ok = gm:broadcast(GM, {batch_publish, ChPid, Flow, Publishes2},
+ MsgSizes),
+ BQS1 = BQ:batch_publish(Publishes2, ChPid, Flow, BQS),
+ ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
+%% [0] When the mirror process handles the publish command, it sets the
+%% IsDelivered flag to true, so to avoid iterating over the messages
+%% again at the mirror, we do it here.
+
+publish_delivered(Msg = #basic_message { id = MsgId }, MsgProps,
+ ChPid, Flow, State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ false = maps:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {publish_delivered, ChPid, Flow, MsgProps, Msg},
+ rabbit_basic:msg_size(Msg)),
+ {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {AckTag, ensure_monitoring(ChPid, State1)}.
+
+batch_publish_delivered(Publishes, ChPid, Flow,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {false, MsgSizes} =
+ lists:foldl(fun ({Msg = #basic_message { id = MsgId }, _MsgProps},
+ {false, Sizes}) ->
+ {false = maps:is_key(MsgId, SS), %% ASSERTION
+ Sizes + rabbit_basic:msg_size(Msg)}
+ end, {false, 0}, Publishes),
+ ok = gm:broadcast(GM, {batch_publish_delivered, ChPid, Flow, Publishes},
+ MsgSizes),
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {AckTags, ensure_monitoring(ChPid, State1)}.
+
+discard(MsgId, ChPid, Flow, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = SS }) ->
+ false = maps:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {discard, ChPid, Flow, MsgId}),
+ ensure_monitoring(ChPid,
+ State #state { backing_queue_state =
+ BQ:discard(MsgId, ChPid, Flow, BQS) }).
+
+dropwhile(Pred, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ {Next, BQS1} = BQ:dropwhile(Pred, BQS),
+ {Next, drop(Len, false, State #state { backing_queue_state = BQS1 })}.
+
+fetchwhile(Pred, Fun, Acc, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ {Next, Acc1, BQS1} = BQ:fetchwhile(Pred, Fun, Acc, BQS),
+ {Next, Acc1, drop(Len, true, State #state { backing_queue_state = BQS1 })}.
+
+drain_confirmed(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = SS,
+ confirmed = Confirmed }) ->
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ {MsgIds1, SS1} =
+ lists:foldl(
+ fun (MsgId, {MsgIdsN, SSN}) ->
+ %% We will never see 'discarded' here
+ case maps:find(MsgId, SSN) of
+ error ->
+ {[MsgId | MsgIdsN], SSN};
+ {ok, published} ->
+ %% It was published when we were a mirror,
+ %% and we were promoted before we saw the
+ %% publish from the channel. We still
+ %% haven't seen the channel publish, and
+ %% consequently we need to filter out the
+ %% confirm here. We will issue the confirm
+ %% when we see the publish from the channel.
+ {MsgIdsN, maps:put(MsgId, confirmed, SSN)};
+ {ok, confirmed} ->
+ %% Well, confirms are racy by definition.
+ {[MsgId | MsgIdsN], SSN}
+ end
+ end, {[], SS}, MsgIds),
+ {Confirmed ++ MsgIds1, State #state { backing_queue_state = BQS1,
+ seen_status = SS1,
+ confirmed = [] }}.
+
+fetch(AckRequired, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:fetch(AckRequired, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {Result, case Result of
+ empty -> State1;
+ {_MsgId, _IsDelivered, _AckTag} -> drop_one(AckRequired, State1)
+ end}.
+
+drop(AckRequired, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:drop(AckRequired, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {Result, case Result of
+ empty -> State1;
+ {_MsgId, _AckTag} -> drop_one(AckRequired, State1)
+ end}.
+
+ack(AckTags, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {MsgIds, BQS1} = BQ:ack(AckTags, BQS),
+ case MsgIds of
+ [] -> ok;
+ _ -> ok = gm:broadcast(GM, {ack, MsgIds})
+ end,
+ {MsgIds, State #state { backing_queue_state = BQS1 }}.
+
+requeue(AckTags, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ ok = gm:broadcast(GM, {requeue, MsgIds}),
+ {MsgIds, State #state { backing_queue_state = BQS1 }}.
+
+ackfold(MsgFun, Acc, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }, AckTags) ->
+ {Acc1, BQS1} = BQ:ackfold(MsgFun, Acc, BQS, AckTags),
+ {Acc1, State #state { backing_queue_state = BQS1 }}.
+
+fold(Fun, Acc, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:fold(Fun, Acc, BQS),
+ {Result, State #state { backing_queue_state = BQS1 }}.
+
+len(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:len(BQS).
+
+is_empty(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:is_empty(BQS).
+
+depth(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:depth(BQS).
+
+set_ram_duration_target(Target, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state =
+ BQ:set_ram_duration_target(Target, BQS) }.
+
+ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:ram_duration(BQS),
+ {Result, State #state { backing_queue_state = BQS1 }}.
+
+needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:needs_timeout(BQS).
+
+timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:timeout(BQS) }.
+
+handle_pre_hibernate(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:handle_pre_hibernate(BQS) }.
+
+handle_info(Msg, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:handle_info(Msg, BQS) }.
+
+resume(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:resume(BQS) }.
+
+msg_rates(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:msg_rates(BQS).
+
+info(backing_queue_status,
+ State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:info(backing_queue_status, BQS) ++
+ [ {mirror_seen, maps:size(State #state.seen_status)},
+ {mirror_senders, sets:size(State #state.known_senders)} ];
+info(Item, #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:info(Item, BQS).
+
+invoke(?MODULE, Fun, State) ->
+ Fun(?MODULE, State);
+invoke(Mod, Fun, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
+
+is_duplicate(Message = #basic_message { id = MsgId },
+ State = #state { seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ confirmed = Confirmed }) ->
+ %% Here, we need to deal with the possibility that we're about to
+ %% receive a message that we've already seen when we were a mirror
+ %% (we received it via gm). Thus if we do receive such message now
+ %% via the channel, there may be a confirm waiting to issue for
+ %% it.
+
+ %% We will never see {published, ChPid, MsgSeqNo} here.
+ case maps:find(MsgId, SS) of
+ error ->
+ %% We permit the underlying BQ to have a peek at it, but
+ %% only if we ourselves are not filtering out the msg.
+ {Result, BQS1} = BQ:is_duplicate(Message, BQS),
+ {Result, State #state { backing_queue_state = BQS1 }};
+ {ok, published} ->
+ %% It already got published when we were a mirror and no
+ %% confirmation is waiting. amqqueue_process will have, in
+ %% its msg_id_to_channel mapping, the entry for dealing
+ %% with the confirm when that comes back in (it's added
+ %% immediately after calling is_duplicate). The msg is
+ %% invalid. We will not see this again, nor will we be
+ %% further involved in confirming this message, so erase.
+ {{true, drop}, State #state { seen_status = maps:remove(MsgId, SS) }};
+ {ok, Disposition}
+ when Disposition =:= confirmed
+ %% It got published when we were a mirror via gm, and
+ %% confirmed some time after that (maybe even after
+ %% promotion), but before we received the publish from the
+ %% channel, so couldn't previously know what the
+ %% msg_seq_no was (and thus confirm as a mirror). So we
+ %% need to confirm now. As above, amqqueue_process will
+ %% have the entry for the msg_id_to_channel mapping added
+ %% immediately after calling is_duplicate/2.
+ orelse Disposition =:= discarded ->
+ %% Message was discarded while we were a mirror. Confirm now.
+ %% As above, amqqueue_process will have the entry for the
+ %% msg_id_to_channel mapping.
+ {{true, drop}, State #state { seen_status = maps:remove(MsgId, SS),
+ confirmed = [MsgId | Confirmed] }}
+ end.
+
+set_queue_mode(Mode, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {set_queue_mode, Mode}),
+ BQS1 = BQ:set_queue_mode(Mode, BQS),
+ State #state { backing_queue_state = BQS1 }.
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator,
+ #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQ:zip_msgs_and_acks(Msgs, AckTags, Accumulator, BQS).
+
+%% ---------------------------------------------------------------------------
+%% Other exported functions
+%% ---------------------------------------------------------------------------
+
+-spec promote_backing_queue_state
+ (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()],
+ map(), [pid()]) ->
+ master_state().
+
+promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ Len = BQ:len(BQS1),
+ Depth = BQ:depth(BQS1),
+ true = Len == Depth, %% ASSERTION: everything must have been requeued
+ ok = gm:broadcast(GM, {depth, Depth}),
+ WaitTimeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000),
+ #state { name = QName,
+ gm = GM,
+ coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS1,
+ seen_status = Seen,
+ confirmed = [],
+ known_senders = sets:from_list(KS),
+ wait_timeout = WaitTimeout }.
+
+-spec sender_death_fun() -> death_fun().
+
+sender_death_fun() ->
+ Self = self(),
+ fun (DeadPid) ->
+ rabbit_amqqueue:run_backing_queue(
+ Self, ?MODULE,
+ fun (?MODULE, State = #state { gm = GM, known_senders = KS }) ->
+ ok = gm:broadcast(GM, {sender_death, DeadPid}),
+ KS1 = sets:del_element(DeadPid, KS),
+ State #state { known_senders = KS1 }
+ end)
+ end.
+
+-spec depth_fun() -> depth_fun().
+
+depth_fun() ->
+ Self = self(),
+ fun () ->
+ rabbit_amqqueue:run_backing_queue(
+ Self, ?MODULE,
+ fun (?MODULE, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
+ State
+ end)
+ end.
+
+%% ---------------------------------------------------------------------------
+%% Helpers
+%% ---------------------------------------------------------------------------
+
+drop_one(AckRequired, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckRequired}),
+ State.
+
+drop(PrevLen, AckRequired, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ case PrevLen - Len of
+ 0 -> State;
+ Dropped -> ok = gm:broadcast(GM, {drop, Len, Dropped, AckRequired}),
+ State
+ end.
+
+ensure_monitoring(ChPid, State = #state { coordinator = CPid,
+ known_senders = KS }) ->
+ case sets:is_element(ChPid, KS) of
+ true -> State;
+ false -> ok = rabbit_mirror_queue_coordinator:ensure_monitoring(
+ CPid, [ChPid]),
+ State #state { known_senders = sets:add_element(ChPid, KS) }
+ end.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_misc.erl b/deps/rabbit/src/rabbit_mirror_queue_misc.erl
new file mode 100644
index 0000000000..02f590e2fb
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_misc.erl
@@ -0,0 +1,680 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_misc).
+-behaviour(rabbit_policy_validator).
+
+-export([remove_from_queue/3, on_vhost_up/1, add_mirrors/3,
+ report_deaths/4, store_updated_slaves/1,
+ initial_queue_node/2, suggested_queue_nodes/1, actual_queue_nodes/1,
+ is_mirrored/1, is_mirrored_ha_nodes/1,
+ update_mirrors/2, update_mirrors/1, validate_policy/1,
+ maybe_auto_sync/1, maybe_drop_master_after_sync/1,
+ sync_batch_size/1, log_info/3, log_warning/3]).
+-export([stop_all_slaves/5]).
+
+-export([sync_queue/1, cancel_sync_queue/1]).
+
+-export([transfer_leadership/2, queue_length/1, get_replicas/1]).
+
+%% for testing only
+-export([module/1]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-define(HA_NODES_MODULE, rabbit_mirror_queue_mode_nodes).
+
+-rabbit_boot_step(
+ {?MODULE,
+ [{description, "HA policy validation"},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-mode">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-params">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-sync-mode">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-sync-batch-size">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-promote-on-shutdown">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-promote-on-failure">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+
+%%----------------------------------------------------------------------------
+
+%% Returns {ok, NewMPid, DeadPids, ExtraNodes}
+
+-spec remove_from_queue
+ (rabbit_amqqueue:name(), pid(), [pid()]) ->
+ {'ok', pid(), [pid()], [node()]} | {'error', 'not_found'} |
+ {'error', {'not_synced', [pid()]}}.
+
+remove_from_queue(QueueName, Self, DeadGMPids) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ %% Someone else could have deleted the queue before we
+ %% get here. Or, gm group could've altered. see rabbitmq-server#914
+ case mnesia:read({rabbit_queue, QueueName}) of
+ [] -> {error, not_found};
+ [Q0] when ?is_amqqueue(Q0) ->
+ QPid = amqqueue:get_pid(Q0),
+ SPids = amqqueue:get_slave_pids(Q0),
+ SyncSPids = amqqueue:get_sync_slave_pids(Q0),
+ GMPids = amqqueue:get_gm_pids(Q0),
+ {DeadGM, AliveGM} = lists:partition(
+ fun ({GM, _}) ->
+ lists:member(GM, DeadGMPids)
+ end, GMPids),
+ DeadPids = [Pid || {_GM, Pid} <- DeadGM],
+ AlivePids = [Pid || {_GM, Pid} <- AliveGM],
+ Alive = [Pid || Pid <- [QPid | SPids],
+ lists:member(Pid, AlivePids)],
+ {QPid1, SPids1} = case Alive of
+ [] ->
+ %% GM altered, & if all pids are
+ %% perceived as dead, rather do
+ %% do nothing here, & trust the
+ %% promoted mirror to have updated
+ %% mnesia during the alteration.
+ {QPid, SPids};
+ _ -> promote_slave(Alive)
+ end,
+ DoNotPromote = SyncSPids =:= [] andalso
+ rabbit_policy:get(<<"ha-promote-on-failure">>, Q0) =:= <<"when-synced">>,
+ case {{QPid, SPids}, {QPid1, SPids1}} of
+ {Same, Same} ->
+ {ok, QPid1, DeadPids, []};
+ _ when QPid1 =/= QPid andalso QPid1 =:= Self andalso DoNotPromote =:= true ->
+ %% We have been promoted to master
+ %% but there are no synchronised mirrors
+ %% hence this node is not synchronised either
+ %% Bailing out.
+ {error, {not_synced, SPids1}};
+ _ when QPid =:= QPid1 orelse QPid1 =:= Self ->
+ %% Either master hasn't changed, so
+ %% we're ok to update mnesia; or we have
+ %% become the master. If gm altered,
+ %% we have no choice but to proceed.
+ Q1 = amqqueue:set_pid(Q0, QPid1),
+ Q2 = amqqueue:set_slave_pids(Q1, SPids1),
+ Q3 = amqqueue:set_gm_pids(Q2, AliveGM),
+ store_updated_slaves(Q3),
+ %% If we add and remove nodes at the
+ %% same time we might tell the old
+ %% master we need to sync and then
+ %% shut it down. So let's check if
+ %% the new master needs to sync.
+ maybe_auto_sync(Q3),
+ {ok, QPid1, DeadPids, slaves_to_start_on_failure(Q3, DeadGMPids)};
+ _ ->
+ %% Master has changed, and we're not it.
+ %% [1].
+ Q1 = amqqueue:set_slave_pids(Q0, Alive),
+ Q2 = amqqueue:set_gm_pids(Q1, AliveGM),
+ store_updated_slaves(Q2),
+ {ok, QPid1, DeadPids, []}
+ end
+ end
+ end).
+%% [1] We still update mnesia here in case the mirror that is supposed
+%% to become master dies before it does do so, in which case the dead
+%% old master might otherwise never get removed, which in turn might
+%% prevent promotion of another mirror (e.g. us).
+%%
+%% Note however that we do not update the master pid. Otherwise we can
+%% have the situation where a mirror updates the mnesia record for a
+%% queue, promoting another mirror before that mirror realises it has
+%% become the new master, which is bad because it could then mean the
+%% mirror (now master) receives messages it's not ready for (for
+%% example, new consumers).
+%%
+%% We set slave_pids to Alive rather than SPids1 since otherwise we'd
+%% be removing the pid of the candidate master, which in turn would
+%% prevent it from promoting itself.
+%%
+%% We maintain gm_pids as our source of truth, i.e. it contains the
+%% most up-to-date information about which GMs and associated
+%% {M,S}Pids are alive. And all pids in slave_pids always have a
+%% corresponding entry in gm_pids. By contrast, due to the
+%% aforementioned restriction on updating the master pid, that pid may
+%% not be present in gm_pids, but only if said master has died.
+
+%% Sometimes a mirror dying means we need to start more on other
+%% nodes - "exactly" mode can cause this to happen.
+slaves_to_start_on_failure(Q, DeadGMPids) ->
+ %% In case Mnesia has not caught up yet, filter out nodes we know
+ %% to be dead..
+ ClusterNodes = rabbit_nodes:all_running() --
+ [node(P) || P <- DeadGMPids],
+ {_, OldNodes, _} = actual_queue_nodes(Q),
+ {_, NewNodes} = suggested_queue_nodes(Q, ClusterNodes),
+ NewNodes -- OldNodes.
+
+on_vhost_up(VHost) ->
+ QNames =
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:foldl(
+ fun
+ (Q, QNames0) when not ?amqqueue_vhost_equals(Q, VHost) ->
+ QNames0;
+ (Q, QNames0) when ?amqqueue_is_classic(Q) ->
+ QName = amqqueue:get_name(Q),
+ Pid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ %% We don't want to pass in the whole
+ %% cluster - we don't want a situation
+ %% where starting one node causes us to
+ %% decide to start a mirror on another
+ PossibleNodes0 = [node(P) || P <- [Pid | SPids]],
+ PossibleNodes =
+ case lists:member(node(), PossibleNodes0) of
+ true -> PossibleNodes0;
+ false -> [node() | PossibleNodes0]
+ end,
+ {_MNode, SNodes} = suggested_queue_nodes(
+ Q, PossibleNodes),
+ case lists:member(node(), SNodes) of
+ true -> [QName | QNames0];
+ false -> QNames0
+ end;
+ (_, QNames0) ->
+ QNames0
+ end, [], rabbit_queue)
+ end),
+ [add_mirror(QName, node(), async) || QName <- QNames],
+ ok.
+
+drop_mirrors(QName, Nodes) ->
+ [drop_mirror(QName, Node) || Node <- Nodes],
+ ok.
+
+drop_mirror(QName, MirrorNode) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?is_amqqueue(Q) ->
+ Name = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of
+ [] ->
+ {error, {queue_not_mirrored_on_node, MirrorNode}};
+ [QPid] when SPids =:= [] ->
+ {error, cannot_drop_only_mirror};
+ [Pid] ->
+ log_info(Name, "Dropping queue mirror on node ~p~n",
+ [MirrorNode]),
+ exit(Pid, {shutdown, dropped}),
+ {ok, dropped}
+ end;
+ {error, not_found} = E ->
+ E
+ end.
+
+-spec add_mirrors(rabbit_amqqueue:name(), [node()], 'sync' | 'async') ->
+ 'ok'.
+
+add_mirrors(QName, Nodes, SyncMode) ->
+ [add_mirror(QName, Node, SyncMode) || Node <- Nodes],
+ ok.
+
+add_mirror(QName, MirrorNode, SyncMode) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ rabbit_misc:with_exit_handler(
+ rabbit_misc:const(ok),
+ fun () ->
+ #resource{virtual_host = VHost} = amqqueue:get_name(Q),
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost, MirrorNode) of
+ {ok, _} ->
+ try
+ SPid = rabbit_amqqueue_sup_sup:start_queue_process(
+ MirrorNode, Q, slave),
+ log_info(QName, "Adding mirror on node ~p: ~p~n",
+ [MirrorNode, SPid]),
+ rabbit_mirror_queue_slave:go(SPid, SyncMode)
+ of
+ _ -> ok
+ catch
+ error:QError ->
+ log_warning(QName,
+ "Unable to start queue mirror on node '~p'. "
+ "Target queue supervisor is not running: ~p~n",
+ [MirrorNode, QError])
+ end;
+ {error, Error} ->
+ log_warning(QName,
+ "Unable to start queue mirror on node '~p'. "
+ "Target virtual host is not running: ~p~n",
+ [MirrorNode, Error]),
+ ok
+ end
+ end);
+ {error, not_found} = E ->
+ E
+ end.
+
+report_deaths(_MirrorPid, _IsMaster, _QueueName, []) ->
+ ok;
+report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) ->
+ log_info(QueueName, "~s ~s saw deaths of mirrors~s~n",
+ [case IsMaster of
+ true -> "Master";
+ false -> "Slave"
+ end,
+ rabbit_misc:pid_to_string(MirrorPid),
+ [[$ , rabbit_misc:pid_to_string(P)] || P <- DeadPids]]).
+
+-spec log_info(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'.
+
+log_info (QName, Fmt, Args) ->
+ rabbit_log_mirroring:info("Mirrored ~s: " ++ Fmt,
+ [rabbit_misc:rs(QName) | Args]).
+
+-spec log_warning(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'.
+
+log_warning(QName, Fmt, Args) ->
+ rabbit_log_mirroring:warning("Mirrored ~s: " ++ Fmt,
+ [rabbit_misc:rs(QName) | Args]).
+
+-spec store_updated_slaves(amqqueue:amqqueue()) ->
+ amqqueue:amqqueue().
+
+store_updated_slaves(Q0) when ?is_amqqueue(Q0) ->
+ SPids = amqqueue:get_slave_pids(Q0),
+ SSPids = amqqueue:get_sync_slave_pids(Q0),
+ RS0 = amqqueue:get_recoverable_slaves(Q0),
+ %% TODO now that we clear sync_slave_pids in rabbit_durable_queue,
+ %% do we still need this filtering?
+ SSPids1 = [SSPid || SSPid <- SSPids, lists:member(SSPid, SPids)],
+ Q1 = amqqueue:set_sync_slave_pids(Q0, SSPids1),
+ RS1 = update_recoverable(SPids, RS0),
+ Q2 = amqqueue:set_recoverable_slaves(Q1, RS1),
+ Q3 = amqqueue:set_state(Q2, live),
+ %% amqqueue migration:
+ %% The amqqueue was read from this transaction, no need to handle
+ %% migration.
+ ok = rabbit_amqqueue:store_queue(Q3),
+ %% Wake it up so that we emit a stats event
+ rabbit_amqqueue:notify_policy_changed(Q3),
+ Q3.
+
+%% Recoverable nodes are those which we could promote if the whole
+%% cluster were to suddenly stop and we then lose the master; i.e. all
+%% nodes with running mirrors , and all stopped nodes which had running
+%% mirrors when they were up.
+%%
+%% Therefore we aim here to add new nodes with mirrors , and remove
+%% running nodes without mirrors , We also try to keep the order
+%% constant, and similar to the live SPids field (i.e. oldest
+%% first). That's not necessarily optimal if nodes spend a long time
+%% down, but we don't have a good way to predict what the optimal is
+%% in that case anyway, and we assume nodes will not just be down for
+%% a long time without being removed.
+update_recoverable(SPids, RS) ->
+ SNodes = [node(SPid) || SPid <- SPids],
+ RunningNodes = rabbit_nodes:all_running(),
+ AddNodes = SNodes -- RS,
+ DelNodes = RunningNodes -- SNodes, %% i.e. running with no slave
+ (RS -- DelNodes) ++ AddNodes.
+
+stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) ->
+ PidsMRefs = [{Pid, erlang:monitor(process, Pid)} || Pid <- [GM | SPids]],
+ ok = gm:broadcast(GM, {delete_and_terminate, Reason}),
+ %% It's possible that we could be partitioned from some mirrors
+ %% between the lookup and the broadcast, in which case we could
+ %% monitor them but they would not have received the GM
+ %% message. So only wait for mirrors which are still
+ %% not-partitioned.
+ PendingSlavePids = lists:foldl(fun({Pid, MRef}, Acc) ->
+ case rabbit_mnesia:on_running_node(Pid) of
+ true ->
+ receive
+ {'DOWN', MRef, process, _Pid, _Info} ->
+ Acc
+ after WaitTimeout ->
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Missing 'DOWN' message from ~p in"
+ " node ~p~n", [Pid, node(Pid)]),
+ [Pid | Acc]
+ end;
+ false ->
+ Acc
+ end
+ end, [], PidsMRefs),
+ %% Normally when we remove a mirror another mirror or master will
+ %% notice and update Mnesia. But we just removed them all, and
+ %% have stopped listening ourselves. So manually clean up.
+ rabbit_misc:execute_mnesia_transaction(fun () ->
+ [Q0] = mnesia:read({rabbit_queue, QName}),
+ Q1 = amqqueue:set_gm_pids(Q0, []),
+ Q2 = amqqueue:set_slave_pids(Q1, []),
+ %% Restarted mirrors on running nodes can
+ %% ensure old incarnations are stopped using
+ %% the pending mirror pids.
+ Q3 = amqqueue:set_slave_pids_pending_shutdown(Q2, PendingSlavePids),
+ rabbit_mirror_queue_misc:store_updated_slaves(Q3)
+ end),
+ ok = gm:forget_group(QName).
+
+%%----------------------------------------------------------------------------
+
+promote_slave([SPid | SPids]) ->
+ %% The mirror pids are maintained in descending order of age, so
+ %% the one to promote is the oldest.
+ {SPid, SPids}.
+
+-spec initial_queue_node(amqqueue:amqqueue(), node()) -> node().
+
+initial_queue_node(Q, DefNode) ->
+ {MNode, _SNodes} = suggested_queue_nodes(Q, DefNode, rabbit_nodes:all_running()),
+ MNode.
+
+-spec suggested_queue_nodes(amqqueue:amqqueue()) ->
+ {node(), [node()]}.
+
+suggested_queue_nodes(Q) -> suggested_queue_nodes(Q, rabbit_nodes:all_running()).
+suggested_queue_nodes(Q, All) -> suggested_queue_nodes(Q, node(), All).
+
+%% The third argument exists so we can pull a call to
+%% rabbit_nodes:all_running() out of a loop or transaction
+%% or both.
+suggested_queue_nodes(Q, DefNode, All) when ?is_amqqueue(Q) ->
+ Owner = amqqueue:get_exclusive_owner(Q),
+ {MNode0, SNodes, SSNodes} = actual_queue_nodes(Q),
+ MNode = case MNode0 of
+ none -> DefNode;
+ _ -> MNode0
+ end,
+ case Owner of
+ none -> Params = policy(<<"ha-params">>, Q),
+ case module(Q) of
+ {ok, M} -> M:suggested_queue_nodes(
+ Params, MNode, SNodes, SSNodes, All);
+ _ -> {MNode, []}
+ end;
+ _ -> {MNode, []}
+ end.
+
+policy(Policy, Q) ->
+ case rabbit_policy:get(Policy, Q) of
+ undefined -> none;
+ P -> P
+ end.
+
+module(Q) when ?is_amqqueue(Q) ->
+ case rabbit_policy:get(<<"ha-mode">>, Q) of
+ undefined -> not_mirrored;
+ Mode -> module(Mode)
+ end;
+
+module(Mode) when is_binary(Mode) ->
+ case rabbit_registry:binary_to_type(Mode) of
+ {error, not_found} -> not_mirrored;
+ T -> case rabbit_registry:lookup_module(ha_mode, T) of
+ {ok, Module} -> {ok, Module};
+ _ -> not_mirrored
+ end
+ end.
+
+validate_mode(Mode) ->
+ case module(Mode) of
+ {ok, _Module} ->
+ ok;
+ not_mirrored ->
+ {error, "~p is not a valid ha-mode value", [Mode]}
+ end.
+
+-spec is_mirrored(amqqueue:amqqueue()) -> boolean().
+
+is_mirrored(Q) ->
+ case module(Q) of
+ {ok, _} -> true;
+ _ -> false
+ end.
+
+is_mirrored_ha_nodes(Q) ->
+ case module(Q) of
+ {ok, ?HA_NODES_MODULE} -> true;
+ _ -> false
+ end.
+
+actual_queue_nodes(Q) when ?is_amqqueue(Q) ->
+ MPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ SSPids = amqqueue:get_sync_slave_pids(Q),
+ Nodes = fun (L) -> [node(Pid) || Pid <- L] end,
+ {case MPid of
+ none -> none;
+ _ -> node(MPid)
+ end, Nodes(SPids), Nodes(SSPids)}.
+
+-spec maybe_auto_sync(amqqueue:amqqueue()) -> 'ok'.
+
+maybe_auto_sync(Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case policy(<<"ha-sync-mode">>, Q) of
+ <<"automatic">> ->
+ spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end);
+ _ ->
+ ok
+ end.
+
+sync_queue(Q0) ->
+ F = fun
+ (Q) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ rabbit_amqqueue:sync_mirrors(QPid);
+ (Q) when ?amqqueue_is_quorum(Q) ->
+ {error, quorum_queue_not_supported}
+ end,
+ rabbit_amqqueue:with(Q0, F).
+
+cancel_sync_queue(Q0) ->
+ F = fun
+ (Q) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ rabbit_amqqueue:cancel_sync_mirrors(QPid);
+ (Q) when ?amqqueue_is_quorum(Q) ->
+ {error, quorum_queue_not_supported}
+ end,
+ rabbit_amqqueue:with(Q0, F).
+
+sync_batch_size(Q) when ?is_amqqueue(Q) ->
+ case policy(<<"ha-sync-batch-size">>, Q) of
+ none -> %% we need this case because none > 1 == true
+ default_batch_size();
+ BatchSize when BatchSize > 1 ->
+ BatchSize;
+ _ ->
+ default_batch_size()
+ end.
+
+-define(DEFAULT_BATCH_SIZE, 4096).
+
+default_batch_size() ->
+ rabbit_misc:get_env(rabbit, mirroring_sync_batch_size,
+ ?DEFAULT_BATCH_SIZE).
+
+-spec update_mirrors
+ (amqqueue:amqqueue(), amqqueue:amqqueue()) -> 'ok'.
+
+update_mirrors(OldQ, NewQ) when ?amqqueue_pids_are_equal(OldQ, NewQ) ->
+ % Note: we do want to ensure both queues have same pid
+ QPid = amqqueue:get_pid(OldQ),
+ QPid = amqqueue:get_pid(NewQ),
+ case {is_mirrored(OldQ), is_mirrored(NewQ)} of
+ {false, false} -> ok;
+ _ -> rabbit_amqqueue:update_mirroring(QPid)
+ end.
+
+-spec update_mirrors
+ (amqqueue:amqqueue()) -> 'ok'.
+
+update_mirrors(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ {OldMNode, OldSNodes, _} = actual_queue_nodes(Q),
+ {NewMNode, NewSNodes} = suggested_queue_nodes(Q),
+ OldNodes = [OldMNode | OldSNodes],
+ NewNodes = [NewMNode | NewSNodes],
+ %% When a mirror dies, remove_from_queue/2 might have to add new
+ %% mirrors (in "exactly" mode). It will check mnesia to see which
+ %% mirrors there currently are. If drop_mirror/2 is invoked first
+ %% then when we end up in remove_from_queue/2 it will not see the
+ %% mirrors that add_mirror/2 will add, and also want to add them
+ %% (even though we are not responding to the death of a
+ %% mirror). Breakage ensues.
+ add_mirrors (QName, NewNodes -- OldNodes, async),
+ drop_mirrors(QName, OldNodes -- NewNodes),
+ %% This is for the case where no extra nodes were added but we changed to
+ %% a policy requiring auto-sync.
+ maybe_auto_sync(Q),
+ ok.
+
+queue_length(Q) ->
+ [{messages, M}] = rabbit_amqqueue:info(Q, [messages]),
+ M.
+
+get_replicas(Q) ->
+ {MNode, SNodes} = suggested_queue_nodes(Q),
+ [MNode] ++ SNodes.
+
+transfer_leadership(Q, Destination) ->
+ QName = amqqueue:get_name(Q),
+ {OldMNode, OldSNodes, _} = actual_queue_nodes(Q),
+ OldNodes = [OldMNode | OldSNodes],
+ add_mirrors(QName, [Destination] -- OldNodes, async),
+ drop_mirrors(QName, OldNodes -- [Destination]),
+ {Result, NewQ} = wait_for_new_master(QName, Destination),
+ update_mirrors(NewQ),
+ Result.
+
+wait_for_new_master(QName, Destination) ->
+ wait_for_new_master(QName, Destination, 100).
+
+wait_for_new_master(QName, _, 0) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ {{not_migrated, ""}, Q};
+wait_for_new_master(QName, Destination, N) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ case amqqueue:get_pid(Q) of
+ none ->
+ timer:sleep(100),
+ wait_for_new_master(QName, Destination, N - 1);
+ Pid ->
+ case node(Pid) of
+ Destination ->
+ {{migrated, Destination}, Q};
+ _ ->
+ timer:sleep(100),
+ wait_for_new_master(QName, Destination, N - 1)
+ end
+ end.
+
+%% The arrival of a newly synced mirror may cause the master to die if
+%% the policy does not want the master but it has been kept alive
+%% because there were no synced mirrors.
+%%
+%% We don't just call update_mirrors/2 here since that could decide to
+%% start a mirror for some other reason, and since we are the mirror ATM
+%% that allows complicated deadlocks.
+
+-spec maybe_drop_master_after_sync(amqqueue:amqqueue()) -> 'ok'.
+
+maybe_drop_master_after_sync(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ MPid = amqqueue:get_pid(Q),
+ {DesiredMNode, DesiredSNodes} = suggested_queue_nodes(Q),
+ case node(MPid) of
+ DesiredMNode -> ok;
+ OldMNode -> false = lists:member(OldMNode, DesiredSNodes), %% [0]
+ drop_mirror(QName, OldMNode)
+ end,
+ ok.
+%% [0] ASSERTION - if the policy wants the master to change, it has
+%% not just shuffled it into the mirrors. All our modes ensure this
+%% does not happen, but we should guard against a misbehaving plugin.
+
+%%----------------------------------------------------------------------------
+
+validate_policy(KeyList) ->
+ Mode = proplists:get_value(<<"ha-mode">>, KeyList, none),
+ Params = proplists:get_value(<<"ha-params">>, KeyList, none),
+ SyncMode = proplists:get_value(<<"ha-sync-mode">>, KeyList, none),
+ SyncBatchSize = proplists:get_value(
+ <<"ha-sync-batch-size">>, KeyList, none),
+ PromoteOnShutdown = proplists:get_value(
+ <<"ha-promote-on-shutdown">>, KeyList, none),
+ PromoteOnFailure = proplists:get_value(
+ <<"ha-promote-on-failure">>, KeyList, none),
+ case {Mode, Params, SyncMode, SyncBatchSize, PromoteOnShutdown, PromoteOnFailure} of
+ {none, none, none, none, none, none} ->
+ ok;
+ {none, _, _, _, _, _} ->
+ {error, "ha-mode must be specified to specify ha-params, "
+ "ha-sync-mode or ha-promote-on-shutdown", []};
+ _ ->
+ validate_policies(
+ [{Mode, fun validate_mode/1},
+ {Params, ha_params_validator(Mode)},
+ {SyncMode, fun validate_sync_mode/1},
+ {SyncBatchSize, fun validate_sync_batch_size/1},
+ {PromoteOnShutdown, fun validate_pos/1},
+ {PromoteOnFailure, fun validate_pof/1}])
+ end.
+
+ha_params_validator(Mode) ->
+ fun(Val) ->
+ {ok, M} = module(Mode),
+ M:validate_policy(Val)
+ end.
+
+validate_policies([]) ->
+ ok;
+validate_policies([{Val, Validator} | Rest]) ->
+ case Validator(Val) of
+ ok -> validate_policies(Rest);
+ E -> E
+ end.
+
+validate_sync_mode(SyncMode) ->
+ case SyncMode of
+ <<"automatic">> -> ok;
+ <<"manual">> -> ok;
+ none -> ok;
+ Mode -> {error, "ha-sync-mode must be \"manual\" "
+ "or \"automatic\", got ~p", [Mode]}
+ end.
+
+validate_sync_batch_size(none) ->
+ ok;
+validate_sync_batch_size(N) when is_integer(N) andalso N > 0 ->
+ ok;
+validate_sync_batch_size(N) ->
+ {error, "ha-sync-batch-size takes an integer greater than 0, "
+ "~p given", [N]}.
+
+validate_pos(PromoteOnShutdown) ->
+ case PromoteOnShutdown of
+ <<"always">> -> ok;
+ <<"when-synced">> -> ok;
+ none -> ok;
+ Mode -> {error, "ha-promote-on-shutdown must be "
+ "\"always\" or \"when-synced\", got ~p", [Mode]}
+ end.
+
+validate_pof(PromoteOnShutdown) ->
+ case PromoteOnShutdown of
+ <<"always">> -> ok;
+ <<"when-synced">> -> ok;
+ none -> ok;
+ Mode -> {error, "ha-promote-on-failure must be "
+ "\"always\" or \"when-synced\", got ~p", [Mode]}
+ end.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode.erl b/deps/rabbit/src/rabbit_mirror_queue_mode.erl
new file mode 100644
index 0000000000..91491efc49
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_mode.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-type master() :: node().
+-type slave() :: node().
+-type params() :: any().
+
+-callback description() -> [proplists:property()].
+
+%% Called whenever we think we might need to change nodes for a
+%% mirrored queue. Note that this is called from a variety of
+%% contexts, both inside and outside Mnesia transactions. Ideally it
+%% will be pure-functional.
+%%
+%% Takes: parameters set in the policy,
+%% current master,
+%% current mirrors,
+%% current synchronised mirrors,
+%% all nodes to consider
+%%
+%% Returns: tuple of new master, new mirrors
+%%
+-callback suggested_queue_nodes(
+ params(), master(), [slave()], [slave()], [node()]) ->
+ {master(), [slave()]}.
+
+%% Are the parameters valid for this mode?
+-callback validate_policy(params()) ->
+ rabbit_policy_validator:validate_results().
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl
new file mode 100644
index 0000000000..2da12a5972
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_all).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode all"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"all">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to all nodes">>}].
+
+suggested_queue_nodes(_Params, MNode, _SNodes, _SSNodes, Poss) ->
+ {MNode, Poss -- [MNode]}.
+
+validate_policy(none) ->
+ ok;
+validate_policy(_Params) ->
+ {error, "ha-mode=\"all\" does not take parameters", []}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl
new file mode 100644
index 0000000000..a8aa7546ac
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_exactly).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode exactly"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"exactly">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to a specified number of nodes">>}].
+
+%% When we need to add nodes, we randomise our candidate list as a
+%% crude form of load-balancing. TODO it would also be nice to
+%% randomise the list of ones to remove when we have too many - we
+%% would have to take account of synchronisation though.
+suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) ->
+ SCount = Count - 1,
+ {MNode, case SCount > length(SNodes) of
+ true -> Cand = shuffle((Poss -- [MNode]) -- SNodes),
+ SNodes ++ lists:sublist(Cand, SCount - length(SNodes));
+ false -> lists:sublist(SNodes, SCount)
+ end}.
+
+shuffle(L) ->
+ {_, L1} = lists:unzip(lists:keysort(1, [{rand:uniform(), N} || N <- L])),
+ L1.
+
+validate_policy(N) when is_integer(N) andalso N > 0 ->
+ ok;
+validate_policy(Params) ->
+ {error, "ha-mode=\"exactly\" takes an integer, ~p given", [Params]}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl
new file mode 100644
index 0000000000..f3e134ba63
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_nodes).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode nodes"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"nodes">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to specified nodes">>}].
+
+suggested_queue_nodes(PolicyNodes0, CurrentMaster, _SNodes, SSNodes, NodesRunningRabbitMQ) ->
+ PolicyNodes1 = [list_to_atom(binary_to_list(Node)) || Node <- PolicyNodes0],
+ %% If the current master is not in the nodes specified, then what we want
+ %% to do depends on whether there are any synchronised mirrors. If there
+ %% are then we can just kill the current master - the admin has asked for
+ %% a migration and we should give it to them. If there are not however
+ %% then we must keep the master around so as not to lose messages.
+
+ PolicyNodes = case SSNodes of
+ [] -> lists:usort([CurrentMaster | PolicyNodes1]);
+ _ -> PolicyNodes1
+ end,
+ Unavailable = PolicyNodes -- NodesRunningRabbitMQ,
+ AvailablePolicyNodes = PolicyNodes -- Unavailable,
+ case AvailablePolicyNodes of
+ [] -> %% We have never heard of anything? Not much we can do but
+ %% keep the master alive.
+ {CurrentMaster, []};
+ _ -> case lists:member(CurrentMaster, AvailablePolicyNodes) of
+ true -> {CurrentMaster,
+ AvailablePolicyNodes -- [CurrentMaster]};
+ false -> %% Make sure the new master is synced! In order to
+ %% get here SSNodes must not be empty.
+ SyncPolicyNodes = [Node ||
+ Node <- AvailablePolicyNodes,
+ lists:member(Node, SSNodes)],
+ NewMaster = case SyncPolicyNodes of
+ [Node | _] -> Node;
+ [] -> erlang:hd(SSNodes)
+ end,
+ {NewMaster, AvailablePolicyNodes -- [NewMaster]}
+ end
+ end.
+
+validate_policy([]) ->
+ {error, "ha-mode=\"nodes\" list must be non-empty", []};
+validate_policy(Nodes) when is_list(Nodes) ->
+ case [I || I <- Nodes, not is_binary(I)] of
+ [] -> ok;
+ Invalid -> {error, "ha-mode=\"nodes\" takes a list of strings, "
+ "~p was not a string", [Invalid]}
+ end;
+validate_policy(Params) ->
+ {error, "ha-mode=\"nodes\" takes a list, ~p given", [Params]}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_slave.erl b/deps/rabbit/src/rabbit_mirror_queue_slave.erl
new file mode 100644
index 0000000000..0480db9cfe
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_slave.erl
@@ -0,0 +1,1093 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_slave).
+
+%% For general documentation of HA design, see
+%% rabbit_mirror_queue_coordinator
+%%
+%% We receive messages from GM and from publishers, and the gm
+%% messages can arrive either before or after the 'actual' message.
+%% All instructions from the GM group must be processed in the order
+%% in which they're received.
+
+-export([set_maximum_since_use/2, info/1, go/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, handle_pre_hibernate/1, prioritise_call/4,
+ prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
+
+-export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
+
+-behaviour(gen_server2).
+-behaviour(gm).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-include("amqqueue.hrl").
+-include("gm_specs.hrl").
+
+%%----------------------------------------------------------------------------
+
+-define(INFO_KEYS,
+ [pid,
+ name,
+ master_pid,
+ is_synchronised
+ ]).
+
+-define(SYNC_INTERVAL, 25). %% milliseconds
+-define(RAM_DURATION_UPDATE_INTERVAL, 5000).
+-define(DEATH_TIMEOUT, 20000). %% 20 seconds
+
+-record(state, { q,
+ gm,
+ backing_queue,
+ backing_queue_state,
+ sync_timer_ref,
+ rate_timer_ref,
+
+ sender_queues, %% :: Pid -> {Q Msg, Set MsgId, ChState}
+ msg_id_ack, %% :: MsgId -> AckTag
+
+ msg_id_status,
+ known_senders,
+
+ %% Master depth - local depth
+ depth_delta
+ }).
+
+%%----------------------------------------------------------------------------
+
+set_maximum_since_use(QPid, Age) ->
+ gen_server2:cast(QPid, {set_maximum_since_use, Age}).
+
+info(QPid) -> gen_server2:call(QPid, info, infinity).
+
+init(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ ?store_proc_name(QName),
+ {ok, {not_started, Q}, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN,
+ ?DESIRED_HIBERNATE}, ?MODULE}.
+
+go(SPid, sync) -> gen_server2:call(SPid, go, infinity);
+go(SPid, async) -> gen_server2:cast(SPid, go).
+
+handle_go(Q0) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ %% We join the GM group before we add ourselves to the amqqueue
+ %% record. As a result:
+ %% 1. We can receive msgs from GM that correspond to messages we will
+ %% never receive from publishers.
+ %% 2. When we receive a message from publishers, we must receive a
+ %% message from the GM group for it.
+ %% 3. However, that instruction from the GM group can arrive either
+ %% before or after the actual message. We need to be able to
+ %% distinguish between GM instructions arriving early, and case (1)
+ %% above.
+ %%
+ process_flag(trap_exit, true), %% amqqueue_process traps exits too.
+ {ok, GM} = gm:start_link(QName, ?MODULE, [self()],
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ MRef = erlang:monitor(process, GM),
+ %% We ignore the DOWN message because we are also linked and
+ %% trapping exits, we just want to not get stuck and we will exit
+ %% later.
+ receive
+ {joined, GM} -> erlang:demonitor(MRef, [flush]),
+ ok;
+ {'DOWN', MRef, _, _, _} -> ok
+ end,
+ Self = self(),
+ Node = node(),
+ case rabbit_misc:execute_mnesia_transaction(
+ fun() -> init_it(Self, GM, Node, QName) end) of
+ {new, QPid, GMPids} ->
+ ok = file_handle_cache:register_callback(
+ rabbit_amqqueue, set_maximum_since_use, [Self]),
+ ok = rabbit_memory_monitor:register(
+ Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}),
+ {ok, BQ} = application:get_env(backing_queue_module),
+ Q1 = amqqueue:set_pid(Q0, QPid),
+ _ = BQ:delete_crashed(Q1), %% For crash recovery
+ BQS = bq_init(BQ, Q1, new),
+ State = #state { q = Q1,
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = undefined,
+ sync_timer_ref = undefined,
+
+ sender_queues = #{},
+ msg_id_ack = #{},
+
+ msg_id_status = #{},
+ known_senders = pmon:new(delegate),
+
+ depth_delta = undefined
+ },
+ ok = gm:broadcast(GM, request_depth),
+ ok = gm:validate_members(GM, [GM | [G || {G, _} <- GMPids]]),
+ rabbit_mirror_queue_misc:maybe_auto_sync(Q1),
+ {ok, State};
+ {stale, StalePid} ->
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Detected stale HA master: ~p~n", [StalePid]),
+ gm:leave(GM),
+ {error, {stale_master_pid, StalePid}};
+ duplicate_live_master ->
+ gm:leave(GM),
+ {error, {duplicate_live_master, Node}};
+ existing ->
+ gm:leave(GM),
+ {error, normal};
+ master_in_recovery ->
+ gm:leave(GM),
+ %% The queue record vanished - we must have a master starting
+ %% concurrently with us. In that case we can safely decide to do
+ %% nothing here, and the master will start us in
+ %% master:init_with_existing_bq/3
+ {error, normal}
+ end.
+
+init_it(Self, GM, Node, QName) ->
+ case mnesia:read({rabbit_queue, QName}) of
+ [Q] when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ GMPids = amqqueue:get_gm_pids(Q),
+ PSPids = amqqueue:get_slave_pids_pending_shutdown(Q),
+ case [Pid || Pid <- [QPid | SPids], node(Pid) =:= Node] of
+ [] -> stop_pending_slaves(QName, PSPids),
+ add_slave(Q, Self, GM),
+ {new, QPid, GMPids};
+ [QPid] -> case rabbit_mnesia:is_process_alive(QPid) of
+ true -> duplicate_live_master;
+ false -> {stale, QPid}
+ end;
+ [SPid] -> case rabbit_mnesia:is_process_alive(SPid) of
+ true -> existing;
+ false -> GMPids1 = [T || T = {_, S} <- GMPids, S =/= SPid],
+ SPids1 = SPids -- [SPid],
+ Q1 = amqqueue:set_slave_pids(Q, SPids1),
+ Q2 = amqqueue:set_gm_pids(Q1, GMPids1),
+ add_slave(Q2, Self, GM),
+ {new, QPid, GMPids1}
+ end
+ end;
+ [] ->
+ master_in_recovery
+ end.
+
+%% Pending mirrors have been asked to stop by the master, but despite the node
+%% being up these did not answer on the expected timeout. Stop local mirrors now.
+stop_pending_slaves(QName, Pids) ->
+ [begin
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Detected a non-responsive classic queue mirror, stopping it: ~p~n", [Pid]),
+ case erlang:process_info(Pid, dictionary) of
+ undefined -> ok;
+ {dictionary, Dict} ->
+ Vhost = QName#resource.virtual_host,
+ {ok, AmqQSup} = rabbit_amqqueue_sup_sup:find_for_vhost(Vhost),
+ case proplists:get_value('$ancestors', Dict) of
+ [Sup, AmqQSup | _] ->
+ exit(Sup, kill),
+ exit(Pid, kill);
+ _ ->
+ ok
+ end
+ end
+ end || Pid <- Pids, node(Pid) =:= node(),
+ true =:= erlang:is_process_alive(Pid)].
+
+%% Add to the end, so they are in descending order of age, see
+%% rabbit_mirror_queue_misc:promote_slave/1
+add_slave(Q0, New, GM) when ?is_amqqueue(Q0) ->
+ SPids = amqqueue:get_slave_pids(Q0),
+ GMPids = amqqueue:get_gm_pids(Q0),
+ SPids1 = SPids ++ [New],
+ GMPids1 = [{GM, New} | GMPids],
+ Q1 = amqqueue:set_slave_pids(Q0, SPids1),
+ Q2 = amqqueue:set_gm_pids(Q1, GMPids1),
+ rabbit_mirror_queue_misc:store_updated_slaves(Q2).
+
+handle_call(go, _From, {not_started, Q} = NotStarted) ->
+ case handle_go(Q) of
+ {ok, State} -> {reply, ok, State};
+ {error, Error} -> {stop, Error, NotStarted}
+ end;
+
+handle_call({gm_deaths, DeadGMPids}, From,
+ State = #state{ gm = GM, q = Q,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ MPid = amqqueue:get_pid(Q),
+ Self = self(),
+ case rabbit_mirror_queue_misc:remove_from_queue(QName, Self, DeadGMPids) of
+ {error, not_found} ->
+ gen_server2:reply(From, ok),
+ {stop, normal, State};
+ {error, {not_synced, _SPids}} ->
+ BQ:delete_and_terminate({error, not_synced}, BQS),
+ {stop, normal, State#state{backing_queue_state = undefined}};
+ {ok, Pid, DeadPids, ExtraNodes} ->
+ rabbit_mirror_queue_misc:report_deaths(Self, false, QName,
+ DeadPids),
+ case Pid of
+ MPid ->
+ %% master hasn't changed
+ gen_server2:reply(From, ok),
+ rabbit_mirror_queue_misc:add_mirrors(
+ QName, ExtraNodes, async),
+ noreply(State);
+ Self ->
+ %% we've become master
+ QueueState = promote_me(From, State),
+ rabbit_mirror_queue_misc:add_mirrors(
+ QName, ExtraNodes, async),
+ {become, rabbit_amqqueue_process, QueueState, hibernate};
+ _ ->
+ %% master has changed to not us
+ gen_server2:reply(From, ok),
+ %% see rabbitmq-server#914;
+ %% It's not always guaranteed that we won't have ExtraNodes.
+ %% If gm alters, master can change to not us with extra nodes,
+ %% in which case we attempt to add mirrors on those nodes.
+ case ExtraNodes of
+ [] -> void;
+ _ -> rabbit_mirror_queue_misc:add_mirrors(
+ QName, ExtraNodes, async)
+ end,
+ %% Since GM is by nature lazy we need to make sure
+ %% there is some traffic when a master dies, to
+ %% make sure all mirrors get informed of the
+ %% death. That is all process_death does, create
+ %% some traffic.
+ ok = gm:broadcast(GM, process_death),
+ Q1 = amqqueue:set_pid(Q, Pid),
+ State1 = State#state{q = Q1},
+ noreply(State1)
+ end
+ end;
+
+handle_call(info, _From, State) ->
+ reply(infos(?INFO_KEYS, State), State).
+
+handle_cast(go, {not_started, Q} = NotStarted) ->
+ case handle_go(Q) of
+ {ok, State} -> {noreply, State};
+ {error, Error} -> {stop, Error, NotStarted}
+ end;
+
+handle_cast({run_backing_queue, Mod, Fun}, State) ->
+ noreply(run_backing_queue(Mod, Fun, State));
+
+handle_cast({gm, Instruction}, State = #state{q = Q0}) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q1} when ?is_amqqueue(Q1) ->
+ SPids = amqqueue:get_slave_pids(Q1),
+ case lists:member(self(), SPids) of
+ true ->
+ handle_process_result(process_instruction(Instruction, State));
+ false ->
+ %% Potentially a duplicated mirror caused by a partial partition,
+ %% will stop as a new mirror could start unaware of our presence
+ {stop, shutdown, State}
+ end;
+ {error, not_found} ->
+ %% Would not expect this to happen after fixing #953
+ {stop, shutdown, State}
+ end;
+
+handle_cast({deliver, Delivery = #delivery{sender = Sender, flow = Flow}, true},
+ State) ->
+ %% Asynchronous, non-"mandatory", deliver mode.
+ %% We are acking messages to the channel process that sent us
+ %% the message delivery. See
+ %% rabbit_amqqueue_process:handle_ch_down for more info.
+ %% If message is rejected by the master, the publish will be nacked
+ %% even if mirrors confirm it. No need to check for length here.
+ maybe_flow_ack(Sender, Flow),
+ noreply(maybe_enqueue_message(Delivery, State));
+
+handle_cast({sync_start, Ref, Syncer},
+ State = #state { depth_delta = DD,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State1 = #state{rate_timer_ref = TRef} = ensure_rate_timer(State),
+ S = fun({MA, TRefN, BQSN}) ->
+ State1#state{depth_delta = undefined,
+ msg_id_ack = maps:from_list(MA),
+ rate_timer_ref = TRefN,
+ backing_queue_state = BQSN}
+ end,
+ case rabbit_mirror_queue_sync:slave(
+ DD, Ref, TRef, Syncer, BQ, BQS,
+ fun (BQN, BQSN) ->
+ BQSN1 = update_ram_duration(BQN, BQSN),
+ TRefN = rabbit_misc:send_after(?RAM_DURATION_UPDATE_INTERVAL,
+ self(), update_ram_duration),
+ {TRefN, BQSN1}
+ end) of
+ denied -> noreply(State1);
+ {ok, Res} -> noreply(set_delta(0, S(Res)));
+ {failed, Res} -> noreply(S(Res));
+ {stop, Reason, Res} -> {stop, Reason, S(Res)}
+ end;
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State);
+
+handle_cast({set_ram_duration_target, Duration},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ noreply(State #state { backing_queue_state = BQS1 });
+
+handle_cast(policy_changed, State) ->
+ %% During partial partitions, we might end up receiving messages expected by a master
+ %% Ignore them
+ noreply(State).
+
+handle_info(update_ram_duration, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQS1 = update_ram_duration(BQ, BQS),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(State #state {
+ rate_timer_ref = undefined,
+ backing_queue_state = BQS1 }),
+ {noreply, State1, Timeout};
+
+handle_info(sync_timeout, State) ->
+ noreply(backing_queue_timeout(
+ State #state { sync_timer_ref = undefined }));
+
+handle_info(timeout, State) ->
+ noreply(backing_queue_timeout(State));
+
+handle_info({'DOWN', _MonitorRef, process, ChPid, _Reason}, State) ->
+ local_sender_death(ChPid, State),
+ noreply(maybe_forget_sender(ChPid, down_from_ch, State));
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State);
+
+handle_info(bump_reduce_memory_use, State) ->
+ noreply(State);
+
+%% In the event of a short partition during sync we can detect the
+%% master's 'death', drop out of sync, and then receive sync messages
+%% which were still in flight. Ignore them.
+handle_info({sync_msg, _Ref, _Msg, _Props, _Unacked}, State) ->
+ noreply(State);
+
+handle_info({sync_complete, _Ref}, State) ->
+ noreply(State);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(_Reason, {not_started, _Q}) ->
+ ok;
+terminate(_Reason, #state { backing_queue_state = undefined }) ->
+ %% We've received a delete_and_terminate from gm, thus nothing to
+ %% do here.
+ ok;
+terminate({shutdown, dropped} = R, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% See rabbit_mirror_queue_master:terminate/2
+ terminate_common(State),
+ BQ:delete_and_terminate(R, BQS);
+terminate(shutdown, State) ->
+ terminate_shutdown(shutdown, State);
+terminate({shutdown, _} = R, State) ->
+ terminate_shutdown(R, State);
+terminate(Reason, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ terminate_common(State),
+ BQ:delete_and_terminate(Reason, BQS).
+
+%% If the Reason is shutdown, or {shutdown, _}, it is not the queue
+%% being deleted: it's just the node going down. Even though we're a
+%% mirror, we have no idea whether or not we'll be the only copy coming
+%% back up. Thus we must assume we will be, and preserve anything we
+%% have on disk.
+terminate_shutdown(Reason, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ terminate_common(State),
+ BQ:terminate(Reason, BQS).
+
+terminate_common(State) ->
+ ok = rabbit_memory_monitor:deregister(self()),
+ stop_rate_timer(stop_sync_timer(State)).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_pre_hibernate({not_started, _Q} = State) ->
+ {hibernate, State};
+
+handle_pre_hibernate(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ BQS3 = BQ:handle_pre_hibernate(BQS2),
+ {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS3 })}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ info -> 9;
+ {gm_deaths, _Dead} -> 5;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {set_ram_duration_target, _Duration} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {run_backing_queue, _Mod, _Fun} -> 6;
+ {gm, _Msg} -> 5;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ update_ram_duration -> 8;
+ sync_timeout -> 6;
+ _ -> 0
+ end.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+%% ---------------------------------------------------------------------------
+%% GM
+%% ---------------------------------------------------------------------------
+
+joined([SPid], _Members) -> SPid ! {joined, self()}, ok.
+
+members_changed([_SPid], _Births, []) ->
+ ok;
+members_changed([ SPid], _Births, Deaths) ->
+ case rabbit_misc:with_exit_handler(
+ rabbit_misc:const(ok),
+ fun() ->
+ gen_server2:call(SPid, {gm_deaths, Deaths}, infinity)
+ end) of
+ ok -> ok;
+ {promote, CPid} -> {become, rabbit_mirror_queue_coordinator, [CPid]}
+ end.
+
+handle_msg([_SPid], _From, hibernate_heartbeat) ->
+ %% See rabbit_mirror_queue_coordinator:handle_pre_hibernate/1
+ ok;
+handle_msg([_SPid], _From, request_depth) ->
+ %% This is only of value to the master
+ ok;
+handle_msg([_SPid], _From, {ensure_monitoring, _Pid}) ->
+ %% This is only of value to the master
+ ok;
+handle_msg([_SPid], _From, process_death) ->
+ %% We must not take any notice of the master death here since it
+ %% comes without ordering guarantees - there could still be
+ %% messages from the master we have yet to receive. When we get
+ %% members_changed, then there will be no more messages.
+ ok;
+handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) ->
+ ok = gen_server2:cast(CPid, {gm, Msg}),
+ {stop, {shutdown, ring_shutdown}};
+handle_msg([SPid], _From, {sync_start, Ref, Syncer, SPids}) ->
+ case lists:member(SPid, SPids) of
+ true -> gen_server2:cast(SPid, {sync_start, Ref, Syncer});
+ false -> ok
+ end;
+handle_msg([SPid], _From, Msg) ->
+ ok = gen_server2:cast(SPid, {gm, Msg}).
+
+handle_terminate([_SPid], _Reason) ->
+ ok.
+
+%% ---------------------------------------------------------------------------
+%% Others
+%% ---------------------------------------------------------------------------
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(pid, _State) ->
+ self();
+i(name, #state{q = Q}) when ?is_amqqueue(Q) ->
+ amqqueue:get_name(Q);
+i(master_pid, #state{q = Q}) when ?is_amqqueue(Q) ->
+ amqqueue:get_pid(Q);
+i(is_synchronised, #state{depth_delta = DD}) ->
+ DD =:= 0;
+i(_, _) ->
+ ''.
+
+bq_init(BQ, Q, Recover) ->
+ Self = self(),
+ BQ:init(Q, Recover,
+ fun (Mod, Fun) ->
+ rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
+ end).
+
+run_backing_queue(rabbit_mirror_queue_master, Fun, State) ->
+ %% Yes, this might look a little crazy, but see comments in
+ %% confirm_sender_death/1
+ Fun(?MODULE, State);
+run_backing_queue(Mod, Fun, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
+
+%% This feature was used by `rabbit_amqqueue_process` and
+%% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x. It is
+%% unused in 3.8.x and thus deprecated. We keep it to support in-place
+%% upgrades to 3.8.x (i.e. mixed-version clusters), but it is a no-op
+%% starting with that version.
+send_mandatory(#delivery{mandatory = false}) ->
+ ok;
+send_mandatory(#delivery{mandatory = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo}) ->
+ gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}).
+
+send_or_record_confirm(_, #delivery{ confirm = false }, MS, _State) ->
+ MS;
+send_or_record_confirm(published, #delivery { sender = ChPid,
+ confirm = true,
+ msg_seq_no = MsgSeqNo,
+ message = #basic_message {
+ id = MsgId,
+ is_persistent = true } },
+ MS, #state{q = Q}) when ?amqqueue_is_durable(Q) ->
+ maps:put(MsgId, {published, ChPid, MsgSeqNo} , MS);
+send_or_record_confirm(_Status, #delivery { sender = ChPid,
+ confirm = true,
+ msg_seq_no = MsgSeqNo },
+ MS, #state{q = Q} = _State) ->
+ ok = rabbit_classic_queue:confirm_to_sender(ChPid,
+ amqqueue:get_name(Q), [MsgSeqNo]),
+ MS.
+
+confirm_messages(MsgIds, State = #state{q = Q, msg_id_status = MS}) ->
+ QName = amqqueue:get_name(Q),
+ {CMs, MS1} =
+ lists:foldl(
+ fun (MsgId, {CMsN, MSN} = Acc) ->
+ %% We will never see 'discarded' here
+ case maps:find(MsgId, MSN) of
+ error ->
+ %% If it needed confirming, it'll have
+ %% already been done.
+ Acc;
+ {ok, published} ->
+ %% Still not seen it from the channel, just
+ %% record that it's been confirmed.
+ {CMsN, maps:put(MsgId, confirmed, MSN)};
+ {ok, {published, ChPid, MsgSeqNo}} ->
+ %% Seen from both GM and Channel. Can now
+ %% confirm.
+ {rabbit_misc:gb_trees_cons(ChPid, MsgSeqNo, CMsN),
+ maps:remove(MsgId, MSN)};
+ {ok, confirmed} ->
+ %% It's already been confirmed. This is
+ %% probably it's been both sync'd to disk
+ %% and then delivered and ack'd before we've
+ %% seen the publish from the
+ %% channel. Nothing to do here.
+ Acc
+ end
+ end, {gb_trees:empty(), MS}, MsgIds),
+ Fun = fun (Pid, MsgSeqNos) ->
+ rabbit_classic_queue:confirm_to_sender(Pid, QName, MsgSeqNos)
+ end,
+ rabbit_misc:gb_trees_foreach(Fun, CMs),
+ State #state { msg_id_status = MS1 }.
+
+handle_process_result({ok, State}) -> noreply(State);
+handle_process_result({stop, State}) -> {stop, normal, State}.
+
+-spec promote_me({pid(), term()}, #state{}) -> no_return().
+
+promote_me(From, #state { q = Q0,
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = RateTRef,
+ sender_queues = SQ,
+ msg_id_ack = MA,
+ msg_id_status = MS,
+ known_senders = KS}) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ rabbit_mirror_queue_misc:log_info(QName, "Promoting mirror ~s to master~n",
+ [rabbit_misc:pid_to_string(self())]),
+ Q1 = amqqueue:set_pid(Q0, self()),
+ DeathFun = rabbit_mirror_queue_master:sender_death_fun(),
+ DepthFun = rabbit_mirror_queue_master:depth_fun(),
+ {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q1, GM, DeathFun, DepthFun),
+ true = unlink(GM),
+ gen_server2:reply(From, {promote, CPid}),
+
+ %% Everything that we're monitoring, we need to ensure our new
+ %% coordinator is monitoring.
+ MPids = pmon:monitored(KS),
+ ok = rabbit_mirror_queue_coordinator:ensure_monitoring(CPid, MPids),
+
+ %% We find all the messages that we've received from channels but
+ %% not from gm, and pass them to the
+ %% queue_process:init_with_backing_queue_state to be enqueued.
+ %%
+ %% We also have to requeue messages which are pending acks: the
+ %% consumers from the master queue have been lost and so these
+ %% messages need requeuing. They might also be pending
+ %% confirmation, and indeed they might also be pending arrival of
+ %% the publication from the channel itself, if we received both
+ %% the publication and the fetch via gm first! Requeuing doesn't
+ %% affect confirmations: if the message was previously pending a
+ %% confirmation then it still will be, under the same msg_id. So
+ %% as a master, we need to be prepared to filter out the
+ %% publication of said messages from the channel (is_duplicate
+ %% (thus such requeued messages must remain in the msg_id_status
+ %% (MS) which becomes seen_status (SS) in the master)).
+ %%
+ %% Then there are messages we already have in the queue, which are
+ %% not currently pending acknowledgement:
+ %% 1. Messages we've only received via gm:
+ %% Filter out subsequent publication from channel through
+ %% validate_message. Might have to issue confirms then or
+ %% later, thus queue_process state will have to know that
+ %% there's a pending confirm.
+ %% 2. Messages received via both gm and channel:
+ %% Queue will have to deal with issuing confirms if necessary.
+ %%
+ %% MS contains the following three entry types:
+ %%
+ %% a) published:
+ %% published via gm only; pending arrival of publication from
+ %% channel, maybe pending confirm.
+ %%
+ %% b) {published, ChPid, MsgSeqNo}:
+ %% published via gm and channel; pending confirm.
+ %%
+ %% c) confirmed:
+ %% published via gm only, and confirmed; pending publication
+ %% from channel.
+ %%
+ %% d) discarded:
+ %% seen via gm only as discarded. Pending publication from
+ %% channel
+ %%
+ %% The forms a, c and d only, need to go to the master state
+ %% seen_status (SS).
+ %%
+ %% The form b only, needs to go through to the queue_process
+ %% state to form the msg_id_to_channel mapping (MTC).
+ %%
+ %% No messages that are enqueued from SQ at this point will have
+ %% entries in MS.
+ %%
+ %% Messages that are extracted from MA may have entries in MS, and
+ %% those messages are then requeued. However, as discussed above,
+ %% this does not affect MS, nor which bits go through to SS in
+ %% Master, or MTC in queue_process.
+
+ St = [published, confirmed, discarded],
+ SS = maps:filter(fun (_MsgId, Status) -> lists:member(Status, St) end, MS),
+ AckTags = [AckTag || {_MsgId, AckTag} <- maps:to_list(MA)],
+
+ MasterState = rabbit_mirror_queue_master:promote_backing_queue_state(
+ QName, CPid, BQ, BQS, GM, AckTags, SS, MPids),
+
+ MTC = maps:fold(fun (MsgId, {published, ChPid, MsgSeqNo}, MTC0) ->
+ maps:put(MsgId, {ChPid, MsgSeqNo}, MTC0);
+ (_Msgid, _Status, MTC0) ->
+ MTC0
+ end, #{}, MS),
+ Deliveries = [promote_delivery(Delivery) ||
+ {_ChPid, {PubQ, _PendCh, _ChState}} <- maps:to_list(SQ),
+ Delivery <- queue:to_list(PubQ)],
+ AwaitGmDown = [ChPid || {ChPid, {_, _, down_from_ch}} <- maps:to_list(SQ)],
+ KS1 = lists:foldl(fun (ChPid0, KS0) ->
+ pmon:demonitor(ChPid0, KS0)
+ end, KS, AwaitGmDown),
+ rabbit_misc:store_proc_name(rabbit_amqqueue_process, QName),
+ rabbit_amqqueue_process:init_with_backing_queue_state(
+ Q1, rabbit_mirror_queue_master, MasterState, RateTRef, Deliveries, KS1,
+ MTC).
+
+%% We reset mandatory to false here because we will have sent the
+%% mandatory_received already as soon as we got the message. We also
+%% need to send an ack for these messages since the channel is waiting
+%% for one for the via-GM case and we will not now receive one.
+promote_delivery(Delivery = #delivery{sender = Sender, flow = Flow}) ->
+ maybe_flow_ack(Sender, Flow),
+ Delivery#delivery{mandatory = false}.
+
+noreply(State) ->
+ {NewState, Timeout} = next_state(State),
+ {noreply, ensure_rate_timer(NewState), Timeout}.
+
+reply(Reply, State) ->
+ {NewState, Timeout} = next_state(State),
+ {reply, Reply, ensure_rate_timer(NewState), Timeout}.
+
+next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) ->
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ State1 = confirm_messages(MsgIds,
+ State #state { backing_queue_state = BQS1 }),
+ case BQ:needs_timeout(BQS1) of
+ false -> {stop_sync_timer(State1), hibernate };
+ idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL};
+ timed -> {ensure_sync_timer(State1), 0 }
+ end.
+
+backing_queue_timeout(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State#state{backing_queue_state = BQ:timeout(BQS)}.
+
+ensure_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #state.sync_timer_ref,
+ ?SYNC_INTERVAL, sync_timeout).
+
+stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #state.sync_timer_ref).
+
+ensure_rate_timer(State) ->
+ rabbit_misc:ensure_timer(State, #state.rate_timer_ref,
+ ?RAM_DURATION_UPDATE_INTERVAL,
+ update_ram_duration).
+
+stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #state.rate_timer_ref).
+
+ensure_monitoring(ChPid, State = #state { known_senders = KS }) ->
+ State #state { known_senders = pmon:monitor(ChPid, KS) }.
+
+local_sender_death(ChPid, #state { known_senders = KS }) ->
+ %% The channel will be monitored iff we have received a delivery
+ %% from it but not heard about its death from the master. So if it
+ %% is monitored we need to point the death out to the master (see
+ %% essay).
+ ok = case pmon:is_monitored(ChPid, KS) of
+ false -> ok;
+ true -> confirm_sender_death(ChPid)
+ end.
+
+confirm_sender_death(Pid) ->
+ %% We have to deal with the possibility that we'll be promoted to
+ %% master before this thing gets run. Consequently we set the
+ %% module to rabbit_mirror_queue_master so that if we do become a
+ %% rabbit_amqqueue_process before then, sane things will happen.
+ Fun =
+ fun (?MODULE, State = #state { known_senders = KS,
+ gm = GM }) ->
+ %% We're running still as a mirror
+ %%
+ %% See comment in local_sender_death/2; we might have
+ %% received a sender_death in the meanwhile so check
+ %% again.
+ ok = case pmon:is_monitored(Pid, KS) of
+ false -> ok;
+ true -> gm:broadcast(GM, {ensure_monitoring, [Pid]}),
+ confirm_sender_death(Pid)
+ end,
+ State;
+ (rabbit_mirror_queue_master, State) ->
+ %% We've become a master. State is now opaque to
+ %% us. When we became master, if Pid was still known
+ %% to us then we'd have set up monitoring of it then,
+ %% so this is now a noop.
+ State
+ end,
+ %% Note that we do not remove our knowledge of this ChPid until we
+ %% get the sender_death from GM as well as a DOWN notification.
+ {ok, _TRef} = timer:apply_after(
+ ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue,
+ [self(), rabbit_mirror_queue_master, Fun]),
+ ok.
+
+forget_sender(_, running) -> false;
+forget_sender(down_from_gm, down_from_gm) -> false; %% [1]
+forget_sender(down_from_ch, down_from_ch) -> false;
+forget_sender(Down1, Down2) when Down1 =/= Down2 -> true.
+
+%% [1] If another mirror goes through confirm_sender_death/1 before we
+%% do we can get two GM sender_death messages in a row for the same
+%% channel - don't treat that as anything special.
+
+%% Record and process lifetime events from channels. Forget all about a channel
+%% only when down notifications are received from both the channel and from gm.
+maybe_forget_sender(ChPid, ChState, State = #state { sender_queues = SQ,
+ msg_id_status = MS,
+ known_senders = KS }) ->
+ case maps:find(ChPid, SQ) of
+ error ->
+ State;
+ {ok, {MQ, PendCh, ChStateRecord}} ->
+ case forget_sender(ChState, ChStateRecord) of
+ true ->
+ credit_flow:peer_down(ChPid),
+ State #state { sender_queues = maps:remove(ChPid, SQ),
+ msg_id_status = lists:foldl(
+ fun maps:remove/2,
+ MS, sets:to_list(PendCh)),
+ known_senders = pmon:demonitor(ChPid, KS) };
+ false ->
+ SQ1 = maps:put(ChPid, {MQ, PendCh, ChState}, SQ),
+ State #state { sender_queues = SQ1 }
+ end
+ end.
+
+maybe_enqueue_message(
+ Delivery = #delivery { message = #basic_message { id = MsgId },
+ sender = ChPid },
+ State = #state { sender_queues = SQ, msg_id_status = MS }) ->
+ send_mandatory(Delivery), %% must do this before confirms
+ State1 = ensure_monitoring(ChPid, State),
+ %% We will never see {published, ChPid, MsgSeqNo} here.
+ case maps:find(MsgId, MS) of
+ error ->
+ {MQ, PendingCh, ChState} = get_sender_queue(ChPid, SQ),
+ MQ1 = queue:in(Delivery, MQ),
+ SQ1 = maps:put(ChPid, {MQ1, PendingCh, ChState}, SQ),
+ State1 #state { sender_queues = SQ1 };
+ {ok, Status} ->
+ MS1 = send_or_record_confirm(
+ Status, Delivery, maps:remove(MsgId, MS), State1),
+ SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ),
+ State1 #state { msg_id_status = MS1,
+ sender_queues = SQ1 }
+ end.
+
+get_sender_queue(ChPid, SQ) ->
+ case maps:find(ChPid, SQ) of
+ error -> {queue:new(), sets:new(), running};
+ {ok, Val} -> Val
+ end.
+
+remove_from_pending_ch(MsgId, ChPid, SQ) ->
+ case maps:find(ChPid, SQ) of
+ error ->
+ SQ;
+ {ok, {MQ, PendingCh, ChState}} ->
+ maps:put(ChPid, {MQ, sets:del_element(MsgId, PendingCh), ChState},
+ SQ)
+ end.
+
+publish_or_discard(Status, ChPid, MsgId,
+ State = #state { sender_queues = SQ, msg_id_status = MS }) ->
+ %% We really are going to do the publish/discard right now, even
+ %% though we may not have seen it directly from the channel. But
+ %% we cannot issue confirms until the latter has happened. So we
+ %% need to keep track of the MsgId and its confirmation status in
+ %% the meantime.
+ State1 = ensure_monitoring(ChPid, State),
+ {MQ, PendingCh, ChState} = get_sender_queue(ChPid, SQ),
+ {MQ1, PendingCh1, MS1} =
+ case queue:out(MQ) of
+ {empty, _MQ2} ->
+ {MQ, sets:add_element(MsgId, PendingCh),
+ maps:put(MsgId, Status, MS)};
+ {{value, Delivery = #delivery {
+ message = #basic_message { id = MsgId } }}, MQ2} ->
+ {MQ2, PendingCh,
+ %% We received the msg from the channel first. Thus
+ %% we need to deal with confirms here.
+ send_or_record_confirm(Status, Delivery, MS, State1)};
+ {{value, #delivery {}}, _MQ2} ->
+ %% The instruction was sent to us before we were
+ %% within the slave_pids within the #amqqueue{}
+ %% record. We'll never receive the message directly
+ %% from the channel. And the channel will not be
+ %% expecting any confirms from us.
+ {MQ, PendingCh, MS}
+ end,
+ SQ1 = maps:put(ChPid, {MQ1, PendingCh1, ChState}, SQ),
+ State1 #state { sender_queues = SQ1, msg_id_status = MS1 }.
+
+
+process_instruction({publish, ChPid, Flow, MsgProps,
+ Msg = #basic_message { id = MsgId }}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(published, ChPid, MsgId, State),
+ BQS1 = BQ:publish(Msg, MsgProps, true, ChPid, Flow, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({batch_publish, ChPid, Flow, Publishes}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ lists:foldl(fun ({#basic_message { id = MsgId },
+ _MsgProps, _IsDelivered}, St) ->
+ publish_or_discard(published, ChPid, MsgId, St)
+ end, State, Publishes),
+ BQS1 = BQ:batch_publish(Publishes, ChPid, Flow, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({publish_delivered, ChPid, Flow, MsgProps,
+ Msg = #basic_message { id = MsgId }}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(published, ChPid, MsgId, State),
+ true = BQ:is_empty(BQS),
+ {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS),
+ {ok, maybe_store_ack(true, MsgId, AckTag,
+ State1 #state { backing_queue_state = BQS1 })};
+process_instruction({batch_publish_delivered, ChPid, Flow, Publishes}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ {MsgIds,
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS }} =
+ lists:foldl(fun ({#basic_message { id = MsgId }, _MsgProps},
+ {MsgIds, St}) ->
+ {[MsgId | MsgIds],
+ publish_or_discard(published, ChPid, MsgId, St)}
+ end, {[], State}, Publishes),
+ true = BQ:is_empty(BQS),
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS),
+ MsgIdsAndAcks = lists:zip(lists:reverse(MsgIds), AckTags),
+ State2 = lists:foldl(
+ fun ({MsgId, AckTag}, St) ->
+ maybe_store_ack(true, MsgId, AckTag, St)
+ end, State1 #state { backing_queue_state = BQS1 },
+ MsgIdsAndAcks),
+ {ok, State2};
+process_instruction({discard, ChPid, Flow, MsgId}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(discarded, ChPid, MsgId, State),
+ BQS1 = BQ:discard(MsgId, ChPid, Flow, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({drop, Length, Dropped, AckRequired},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ QLen = BQ:len(BQS),
+ ToDrop = case QLen - Length of
+ N when N > 0 -> N;
+ _ -> 0
+ end,
+ State1 = lists:foldl(
+ fun (const, StateN = #state{backing_queue_state = BQSN}) ->
+ {{MsgId, AckTag}, BQSN1} = BQ:drop(AckRequired, BQSN),
+ maybe_store_ack(
+ AckRequired, MsgId, AckTag,
+ StateN #state { backing_queue_state = BQSN1 })
+ end, State, lists:duplicate(ToDrop, const)),
+ {ok, case AckRequired of
+ true -> State1;
+ false -> update_delta(ToDrop - Dropped, State1)
+ end};
+process_instruction({ack, MsgIds},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_ack = MA }) ->
+ {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA),
+ {MsgIds1, BQS1} = BQ:ack(AckTags, BQS),
+ [] = MsgIds1 -- MsgIds, %% ASSERTION
+ {ok, update_delta(length(MsgIds1) - length(MsgIds),
+ State #state { msg_id_ack = MA1,
+ backing_queue_state = BQS1 })};
+process_instruction({requeue, MsgIds},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_ack = MA }) ->
+ {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA),
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ {ok, State #state { msg_id_ack = MA1,
+ backing_queue_state = BQS1 }};
+process_instruction({sender_death, ChPid},
+ State = #state { known_senders = KS }) ->
+ %% The channel will be monitored iff we have received a message
+ %% from it. In this case we just want to avoid doing work if we
+ %% never got any messages.
+ {ok, case pmon:is_monitored(ChPid, KS) of
+ false -> State;
+ true -> maybe_forget_sender(ChPid, down_from_gm, State)
+ end};
+process_instruction({depth, Depth},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {ok, set_delta(Depth - BQ:depth(BQS), State)};
+
+process_instruction({delete_and_terminate, Reason},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQ:delete_and_terminate(Reason, BQS),
+ {stop, State #state { backing_queue_state = undefined }};
+process_instruction({set_queue_mode, Mode},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQS1 = BQ:set_queue_mode(Mode, BQS),
+ {ok, State #state { backing_queue_state = BQS1 }}.
+
+maybe_flow_ack(Sender, flow) -> credit_flow:ack(Sender);
+maybe_flow_ack(_Sender, noflow) -> ok.
+
+msg_ids_to_acktags(MsgIds, MA) ->
+ {AckTags, MA1} =
+ lists:foldl(
+ fun (MsgId, {Acc, MAN}) ->
+ case maps:find(MsgId, MA) of
+ error -> {Acc, MAN};
+ {ok, AckTag} -> {[AckTag | Acc], maps:remove(MsgId, MAN)}
+ end
+ end, {[], MA}, MsgIds),
+ {lists:reverse(AckTags), MA1}.
+
+maybe_store_ack(false, _MsgId, _AckTag, State) ->
+ State;
+maybe_store_ack(true, MsgId, AckTag, State = #state { msg_id_ack = MA }) ->
+ State #state { msg_id_ack = maps:put(MsgId, AckTag, MA) }.
+
+set_delta(0, State = #state { depth_delta = undefined }) ->
+ ok = record_synchronised(State#state.q),
+ State #state { depth_delta = 0 };
+set_delta(NewDelta, State = #state { depth_delta = undefined }) ->
+ true = NewDelta > 0, %% assertion
+ State #state { depth_delta = NewDelta };
+set_delta(NewDelta, State = #state { depth_delta = Delta }) ->
+ update_delta(NewDelta - Delta, State).
+
+update_delta(_DeltaChange, State = #state { depth_delta = undefined }) ->
+ State;
+update_delta( DeltaChange, State = #state { depth_delta = 0 }) ->
+ 0 = DeltaChange, %% assertion: we cannot become unsync'ed
+ State;
+update_delta( DeltaChange, State = #state { depth_delta = Delta }) ->
+ true = DeltaChange =< 0, %% assertion: we cannot become 'less' sync'ed
+ set_delta(Delta + DeltaChange, State #state { depth_delta = undefined }).
+
+update_ram_duration(BQ, BQS) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQ:set_ram_duration_target(DesiredDuration, BQS1).
+
+record_synchronised(Q0) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ Self = self(),
+ F = fun () ->
+ case mnesia:read({rabbit_queue, QName}) of
+ [] ->
+ ok;
+ [Q1] when ?is_amqqueue(Q1) ->
+ SSPids = amqqueue:get_sync_slave_pids(Q1),
+ SSPids1 = [Self | SSPids],
+ Q2 = amqqueue:set_sync_slave_pids(Q1, SSPids1),
+ rabbit_mirror_queue_misc:store_updated_slaves(Q2),
+ {ok, Q2}
+ end
+ end,
+ case rabbit_misc:execute_mnesia_transaction(F) of
+ ok -> ok;
+ {ok, Q2} -> rabbit_mirror_queue_misc:maybe_drop_master_after_sync(Q2)
+ end.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_sync.erl b/deps/rabbit/src/rabbit_mirror_queue_sync.erl
new file mode 100644
index 0000000000..a82ee05599
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_sync.erl
@@ -0,0 +1,420 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_sync).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([master_prepare/4, master_go/8, slave/7, conserve_resources/3]).
+
+-define(SYNC_PROGRESS_INTERVAL, 1000000).
+
+%% There are three processes around, the master, the syncer and the
+%% slave(s). The syncer is an intermediary, linked to the master in
+%% order to make sure we do not mess with the master's credit flow or
+%% set of monitors.
+%%
+%% Interactions
+%% ------------
+%%
+%% '*' indicates repeating messages. All are standard Erlang messages
+%% except sync_start which is sent over GM to flush out any other
+%% messages that we might have sent that way already. (credit) is the
+%% usual credit_flow bump message every so often.
+%%
+%% Master Syncer Slave(s)
+%% sync_mirrors -> || ||
+%% || -- (spawns) --> || ||
+%% || --------- sync_start (over GM) -------> ||
+%% || || <--- sync_ready ---- ||
+%% || || (or) ||
+%% || || <--- sync_deny ----- ||
+%% || <--- ready ---- || ||
+%% || <--- next* ---- || || }
+%% || ---- msg* ----> || || } loop
+%% || || ---- sync_msgs* ---> || }
+%% || || <--- (credit)* ----- || }
+%% || <--- next ---- || ||
+%% || ---- done ----> || ||
+%% || || -- sync_complete --> ||
+%% || (Dies) ||
+
+-type log_fun() :: fun ((string(), [any()]) -> 'ok').
+-type bq() :: atom().
+-type bqs() :: any().
+-type ack() :: any().
+-type slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(),
+ bqs()}.
+
+%% ---------------------------------------------------------------------------
+%% Master
+
+-spec master_prepare(reference(), rabbit_amqqueue:name(),
+ log_fun(), [pid()]) -> pid().
+
+master_prepare(Ref, QName, Log, SPids) ->
+ MPid = self(),
+ spawn_link(fun () ->
+ ?store_proc_name(QName),
+ syncer(Ref, Log, MPid, SPids)
+ end).
+
+-spec master_go(pid(), reference(), log_fun(),
+ rabbit_mirror_queue_master:stats_fun(),
+ rabbit_mirror_queue_master:stats_fun(),
+ non_neg_integer(),
+ bq(), bqs()) ->
+ {'already_synced', bqs()} | {'ok', bqs()} |
+ {'cancelled', bqs()} |
+ {'shutdown', any(), bqs()} |
+ {'sync_died', any(), bqs()}.
+
+master_go(Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, BQ, BQS) ->
+ Args = {Syncer, Ref, Log, HandleInfo, EmitStats, rabbit_misc:get_parent()},
+ receive
+ {'EXIT', Syncer, normal} -> {already_synced, BQS};
+ {'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS};
+ {ready, Syncer} -> EmitStats({syncing, 0}),
+ master_batch_go0(Args, SyncBatchSize,
+ BQ, BQS)
+ end.
+
+master_batch_go0(Args, BatchSize, BQ, BQS) ->
+ FoldFun =
+ fun (Msg, MsgProps, Unacked, Acc) ->
+ Acc1 = append_to_acc(Msg, MsgProps, Unacked, Acc),
+ case maybe_master_batch_send(Acc1, BatchSize) of
+ true -> master_batch_send(Args, Acc1);
+ false -> {cont, Acc1}
+ end
+ end,
+ FoldAcc = {[], 0, {0, BQ:depth(BQS)}, erlang:monotonic_time()},
+ bq_fold(FoldFun, FoldAcc, Args, BQ, BQS).
+
+master_batch_send({Syncer, Ref, Log, HandleInfo, EmitStats, Parent},
+ {Batch, I, {Curr, Len}, Last}) ->
+ T = maybe_emit_stats(Last, I, EmitStats, Log),
+ HandleInfo({syncing, I}),
+ handle_set_maximum_since_use(),
+ SyncMsg = {msgs, Ref, lists:reverse(Batch)},
+ NewAcc = {[], I + length(Batch), {Curr, Len}, T},
+ master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent).
+
+%% Either send messages when we reach the last one in the queue or
+%% whenever we have accumulated BatchSize messages.
+maybe_master_batch_send({_, _, {Len, Len}, _}, _BatchSize) ->
+ true;
+maybe_master_batch_send({_, _, {Curr, _Len}, _}, BatchSize)
+ when Curr rem BatchSize =:= 0 ->
+ true;
+maybe_master_batch_send(_Acc, _BatchSize) ->
+ false.
+
+bq_fold(FoldFun, FoldAcc, Args, BQ, BQS) ->
+ case BQ:fold(FoldFun, FoldAcc, BQS) of
+ {{shutdown, Reason}, BQS1} -> {shutdown, Reason, BQS1};
+ {{sync_died, Reason}, BQS1} -> {sync_died, Reason, BQS1};
+ {_, BQS1} -> master_done(Args, BQS1)
+ end.
+
+append_to_acc(Msg, MsgProps, Unacked, {Batch, I, {Curr, Len}, T}) ->
+ {[{Msg, MsgProps, Unacked} | Batch], I, {Curr + 1, Len}, T}.
+
+master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent) ->
+ receive
+ {'$gen_call', From,
+ cancel_sync_mirrors} -> stop_syncer(Syncer, {cancel, Ref}),
+ gen_server2:reply(From, ok),
+ {stop, cancelled};
+ {next, Ref} -> Syncer ! SyncMsg,
+ {cont, NewAcc};
+ {'EXIT', Parent, Reason} -> {stop, {shutdown, Reason}};
+ {'EXIT', Syncer, Reason} -> {stop, {sync_died, Reason}}
+ end.
+
+master_done({Syncer, Ref, _Log, _HandleInfo, _EmitStats, Parent}, BQS) ->
+ receive
+ {'$gen_call', From,
+ cancel_sync_mirrors} ->
+ stop_syncer(Syncer, {cancel, Ref}),
+ gen_server2:reply(From, ok),
+ {cancelled, BQS};
+ {cancelled, Ref} ->
+ {cancelled, BQS};
+ {next, Ref} ->
+ stop_syncer(Syncer, {done, Ref}),
+ {ok, BQS};
+ {'EXIT', Parent, Reason} ->
+ {shutdown, Reason, BQS};
+ {'EXIT', Syncer, Reason} ->
+ {sync_died, Reason, BQS}
+ end.
+
+stop_syncer(Syncer, Msg) ->
+ unlink(Syncer),
+ Syncer ! Msg,
+ receive {'EXIT', Syncer, _} -> ok
+ after 0 -> ok
+ end.
+
+maybe_emit_stats(Last, I, EmitStats, Log) ->
+ Interval = erlang:convert_time_unit(
+ erlang:monotonic_time() - Last, native, micro_seconds),
+ case Interval > ?SYNC_PROGRESS_INTERVAL of
+ true -> EmitStats({syncing, I}),
+ Log("~p messages", [I]),
+ erlang:monotonic_time();
+ false -> Last
+ end.
+
+handle_set_maximum_since_use() ->
+ receive
+ {'$gen_cast', {set_maximum_since_use, Age}} ->
+ ok = file_handle_cache:set_maximum_since_use(Age)
+ after 0 ->
+ ok
+ end.
+
+%% Master
+%% ---------------------------------------------------------------------------
+%% Syncer
+
+syncer(Ref, Log, MPid, SPids) ->
+ [erlang:monitor(process, SPid) || SPid <- SPids],
+ %% We wait for a reply from the mirrors so that we know they are in
+ %% a receive block and will thus receive messages we send to them
+ %% *without* those messages ending up in their gen_server2 pqueue.
+ case await_slaves(Ref, SPids) of
+ [] -> Log("all mirrors already synced", []);
+ SPids1 -> MPid ! {ready, self()},
+ Log("mirrors ~p to sync", [[node(SPid) || SPid <- SPids1]]),
+ syncer_check_resources(Ref, MPid, SPids1)
+ end.
+
+await_slaves(Ref, SPids) ->
+ [SPid || SPid <- SPids,
+ rabbit_mnesia:on_running_node(SPid) andalso %% [0]
+ receive
+ {sync_ready, Ref, SPid} -> true;
+ {sync_deny, Ref, SPid} -> false;
+ {'DOWN', _, process, SPid, _} -> false
+ end].
+%% [0] This check is in case there's been a partition which has then
+%% healed in between the master retrieving the mirror pids from Mnesia
+%% and sending 'sync_start' over GM. If so there might be mirrors on the
+%% other side of the partition which we can monitor (since they have
+%% rejoined the distributed system with us) but which did not get the
+%% 'sync_start' and so will not reply. We need to act as though they are
+%% down.
+
+syncer_check_resources(Ref, MPid, SPids) ->
+ rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ %% Before we ask the master node to send the first batch of messages
+ %% over here, we check if one node is already short on memory. If
+ %% that's the case, we wait for the alarm to be cleared before
+ %% starting the syncer loop.
+ AlarmedNodes = lists:any(
+ fun
+ ({{resource_limit, memory, _}, _}) -> true;
+ ({_, _}) -> false
+ end, rabbit_alarm:get_alarms()),
+ if
+ not AlarmedNodes ->
+ MPid ! {next, Ref},
+ syncer_loop(Ref, MPid, SPids);
+ true ->
+ case wait_for_resources(Ref, SPids) of
+ cancel -> MPid ! {cancelled, Ref};
+ SPids1 -> MPid ! {next, Ref},
+ syncer_loop(Ref, MPid, SPids1)
+ end
+ end.
+
+syncer_loop(Ref, MPid, SPids) ->
+ receive
+ {conserve_resources, memory, true} ->
+ case wait_for_resources(Ref, SPids) of
+ cancel -> MPid ! {cancelled, Ref};
+ SPids1 -> syncer_loop(Ref, MPid, SPids1)
+ end;
+ {conserve_resources, _, _} ->
+ %% Ignore other alerts.
+ syncer_loop(Ref, MPid, SPids);
+ {msgs, Ref, Msgs} ->
+ SPids1 = wait_for_credit(SPids),
+ case SPids1 of
+ [] ->
+ % Die silently because there are no mirrors left.
+ ok;
+ _ ->
+ broadcast(SPids1, {sync_msgs, Ref, Msgs}),
+ MPid ! {next, Ref},
+ syncer_loop(Ref, MPid, SPids1)
+ end;
+ {cancel, Ref} ->
+ %% We don't tell the mirrors we will die - so when we do
+ %% they interpret that as a failure, which is what we
+ %% want.
+ ok;
+ {done, Ref} ->
+ [SPid ! {sync_complete, Ref} || SPid <- SPids]
+ end.
+
+broadcast(SPids, Msg) ->
+ [begin
+ credit_flow:send(SPid),
+ SPid ! Msg
+ end || SPid <- SPids].
+
+conserve_resources(Pid, Source, {_, Conserve, _}) ->
+ Pid ! {conserve_resources, Source, Conserve},
+ ok.
+
+wait_for_credit(SPids) ->
+ case credit_flow:blocked() of
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ wait_for_credit(SPids);
+ {'DOWN', _, process, SPid, _} ->
+ credit_flow:peer_down(SPid),
+ wait_for_credit(lists:delete(SPid, SPids))
+ end;
+ false -> SPids
+ end.
+
+wait_for_resources(Ref, SPids) ->
+ receive
+ {conserve_resources, memory, false} ->
+ SPids;
+ {conserve_resources, _, _} ->
+ %% Ignore other alerts.
+ wait_for_resources(Ref, SPids);
+ {cancel, Ref} ->
+ %% We don't tell the mirrors we will die - so when we do
+ %% they interpret that as a failure, which is what we
+ %% want.
+ cancel;
+ {'DOWN', _, process, SPid, _} ->
+ credit_flow:peer_down(SPid),
+ SPids1 = wait_for_credit(lists:delete(SPid, SPids)),
+ wait_for_resources(Ref, SPids1)
+ end.
+
+%% Syncer
+%% ---------------------------------------------------------------------------
+%% Slave
+
+-spec slave(non_neg_integer(), reference(), timer:tref(), pid(),
+ bq(), bqs(), fun((bq(), bqs()) -> {timer:tref(), bqs()})) ->
+ 'denied' |
+ {'ok' | 'failed', slave_sync_state()} |
+ {'stop', any(), slave_sync_state()}.
+
+slave(0, Ref, _TRef, Syncer, _BQ, _BQS, _UpdateRamDuration) ->
+ Syncer ! {sync_deny, Ref, self()},
+ denied;
+
+slave(_DD, Ref, TRef, Syncer, BQ, BQS, UpdateRamDuration) ->
+ MRef = erlang:monitor(process, Syncer),
+ Syncer ! {sync_ready, Ref, self()},
+ {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
+ slave_sync_loop({Ref, MRef, Syncer, BQ, UpdateRamDuration,
+ rabbit_misc:get_parent()}, {[], TRef, BQS1}).
+
+slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
+ State = {MA, TRef, BQS}) ->
+ receive
+ {'DOWN', MRef, process, Syncer, _Reason} ->
+ %% If the master dies half way we are not in the usual
+ %% half-synced state (with messages nearer the tail of the
+ %% queue); instead we have ones nearer the head. If we then
+ %% sync with a newly promoted master, or even just receive
+ %% messages from it, we have a hole in the middle. So the
+ %% only thing to do here is purge.
+ {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
+ credit_flow:peer_down(Syncer),
+ {failed, {[], TRef, BQS1}};
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ slave_sync_loop(Args, State);
+ {sync_complete, Ref} ->
+ erlang:demonitor(MRef, [flush]),
+ credit_flow:peer_down(Syncer),
+ {ok, State};
+ {'$gen_cast', {set_maximum_since_use, Age}} ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ slave_sync_loop(Args, State);
+ {'$gen_cast', {set_ram_duration_target, Duration}} ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ slave_sync_loop(Args, {MA, TRef, BQS1});
+ {'$gen_cast', {run_backing_queue, Mod, Fun}} ->
+ BQS1 = BQ:invoke(Mod, Fun, BQS),
+ slave_sync_loop(Args, {MA, TRef, BQS1});
+ update_ram_duration ->
+ {TRef1, BQS1} = UpdateRamDuration(BQ, BQS),
+ slave_sync_loop(Args, {MA, TRef1, BQS1});
+ {sync_msgs, Ref, Batch} ->
+ credit_flow:ack(Syncer),
+ {MA1, BQS1} = process_batch(Batch, MA, BQ, BQS),
+ slave_sync_loop(Args, {MA1, TRef, BQS1});
+ {'EXIT', Parent, Reason} ->
+ {stop, Reason, State};
+ %% If the master throws an exception
+ {'$gen_cast', {gm, {delete_and_terminate, Reason}}} ->
+ BQ:delete_and_terminate(Reason, BQS),
+ {stop, Reason, {[], TRef, undefined}}
+ end.
+
+%% We are partitioning messages by the Unacked element in the tuple.
+%% when unacked = true, then it's a publish_delivered message,
+%% otherwise it's a publish message.
+%%
+%% Note that we can't first partition the batch and then publish each
+%% part, since that would result in re-ordering messages, which we
+%% don't want to do.
+process_batch([], MA, _BQ, BQS) ->
+ {MA, BQS};
+process_batch(Batch, MA, BQ, BQS) ->
+ {_Msg, _MsgProps, Unacked} = hd(Batch),
+ process_batch(Batch, Unacked, [], MA, BQ, BQS).
+
+process_batch([{Msg, Props, true = Unacked} | Rest], true = Unacked,
+ Acc, MA, BQ, BQS) ->
+ %% publish_delivered messages don't need the IsDelivered flag,
+ %% therefore we just add {Msg, Props} to the accumulator.
+ process_batch(Rest, Unacked, [{Msg, props(Props)} | Acc],
+ MA, BQ, BQS);
+process_batch([{Msg, Props, false = Unacked} | Rest], false = Unacked,
+ Acc, MA, BQ, BQS) ->
+ %% publish messages needs the IsDelivered flag which is set to true
+ %% here.
+ process_batch(Rest, Unacked, [{Msg, props(Props), true} | Acc],
+ MA, BQ, BQS);
+process_batch(Batch, Unacked, Acc, MA, BQ, BQS) ->
+ {MA1, BQS1} = publish_batch(Unacked, lists:reverse(Acc), MA, BQ, BQS),
+ process_batch(Batch, MA1, BQ, BQS1).
+
+%% Unacked msgs are published via batch_publish.
+publish_batch(false, Batch, MA, BQ, BQS) ->
+ batch_publish(Batch, MA, BQ, BQS);
+%% Acked msgs are published via batch_publish_delivered.
+publish_batch(true, Batch, MA, BQ, BQS) ->
+ batch_publish_delivered(Batch, MA, BQ, BQS).
+
+
+batch_publish(Batch, MA, BQ, BQS) ->
+ BQS1 = BQ:batch_publish(Batch, none, noflow, BQS),
+ {MA, BQS1}.
+
+batch_publish_delivered(Batch, MA, BQ, BQS) ->
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Batch, none, noflow, BQS),
+ MA1 = BQ:zip_msgs_and_acks(Batch, AckTags, MA, BQS1),
+ {MA1, BQS1}.
+
+props(Props) ->
+ Props#message_properties{needs_confirming = false}.
diff --git a/deps/rabbit/src/rabbit_mnesia.erl b/deps/rabbit/src/rabbit_mnesia.erl
new file mode 100644
index 0000000000..070c6a8205
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mnesia.erl
@@ -0,0 +1,1117 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mnesia).
+
+-export([%% Main interface
+ init/0,
+ join_cluster/2,
+ reset/0,
+ force_reset/0,
+ update_cluster_nodes/1,
+ change_cluster_node_type/1,
+ forget_cluster_node/2,
+ force_load_next_boot/0,
+
+ %% Various queries to get the status of the db
+ status/0,
+ is_clustered/0,
+ on_running_node/1,
+ is_process_alive/1,
+ is_registered_process_alive/1,
+ cluster_nodes/1,
+ node_type/0,
+ dir/0,
+ cluster_status_from_mnesia/0,
+
+ %% Operations on the db and utils, mainly used in `rabbit_upgrade' and `rabbit'
+ init_db_unchecked/2,
+ copy_db/1,
+ check_cluster_consistency/0,
+ ensure_mnesia_dir/0,
+
+ %% Hooks used in `rabbit_node_monitor'
+ on_node_up/1,
+ on_node_down/1,
+
+ %% Helpers for diagnostics commands
+ schema_info/1
+ ]).
+
+%% Used internally in rpc calls
+-export([node_info/0, remove_node_if_mnesia_running/1]).
+
+-ifdef(TEST).
+-compile(export_all).
+-export([init_with_lock/3]).
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-export_type([node_type/0, cluster_status/0]).
+
+-type node_type() :: disc | ram.
+-type cluster_status() :: {[node()], [node()], [node()]}.
+
+%%----------------------------------------------------------------------------
+%% Main interface
+%%----------------------------------------------------------------------------
+
+-spec init() -> 'ok'.
+
+init() ->
+ ensure_mnesia_running(),
+ ensure_mnesia_dir(),
+ case is_virgin_node() of
+ true ->
+ rabbit_log:info("Node database directory at ~ts is empty. "
+ "Assuming we need to join an existing cluster or initialise from scratch...~n",
+ [dir()]),
+ rabbit_peer_discovery:log_configured_backend(),
+ rabbit_peer_discovery:maybe_init(),
+ init_with_lock();
+ false ->
+ NodeType = node_type(),
+ init_db_and_upgrade(cluster_nodes(all), NodeType,
+ NodeType =:= ram, _Retry = true),
+ rabbit_peer_discovery:maybe_init(),
+ rabbit_peer_discovery:maybe_register()
+ end,
+ %% We intuitively expect the global name server to be synced when
+ %% Mnesia is up. In fact that's not guaranteed to be the case -
+ %% let's make it so.
+ ok = rabbit_node_monitor:global_sync(),
+ ok.
+
+init_with_lock() ->
+ {Retries, Timeout} = rabbit_peer_discovery:locking_retry_timeout(),
+ init_with_lock(Retries, Timeout, fun run_peer_discovery/0).
+
+init_with_lock(0, _, RunPeerDiscovery) ->
+ case rabbit_peer_discovery:lock_acquisition_failure_mode() of
+ ignore ->
+ rabbit_log:warning("Could not acquire a peer discovery lock, out of retries", []),
+ RunPeerDiscovery(),
+ rabbit_peer_discovery:maybe_register();
+ fail ->
+ exit(cannot_acquire_startup_lock)
+ end;
+init_with_lock(Retries, Timeout, RunPeerDiscovery) ->
+ LockResult = rabbit_peer_discovery:lock(),
+ rabbit_log:debug("rabbit_peer_discovery:lock returned ~p", [LockResult]),
+ case LockResult of
+ not_supported ->
+ rabbit_log:info("Peer discovery backend does not support locking, falling back to randomized delay"),
+ %% See rabbitmq/rabbitmq-server#1202 for details.
+ rabbit_peer_discovery:maybe_inject_randomized_delay(),
+ RunPeerDiscovery(),
+ rabbit_peer_discovery:maybe_register();
+ {error, _Reason} ->
+ timer:sleep(Timeout),
+ init_with_lock(Retries - 1, Timeout, RunPeerDiscovery);
+ {ok, Data} ->
+ try
+ RunPeerDiscovery(),
+ rabbit_peer_discovery:maybe_register()
+ after
+ rabbit_peer_discovery:unlock(Data)
+ end
+ end.
+
+-spec run_peer_discovery() -> ok | {[node()], node_type()}.
+run_peer_discovery() ->
+ {RetriesLeft, DelayInterval} = rabbit_peer_discovery:discovery_retries(),
+ run_peer_discovery_with_retries(RetriesLeft, DelayInterval).
+
+-spec run_peer_discovery_with_retries(non_neg_integer(), non_neg_integer()) -> ok | {[node()], node_type()}.
+run_peer_discovery_with_retries(0, _DelayInterval) ->
+ ok;
+run_peer_discovery_with_retries(RetriesLeft, DelayInterval) ->
+ FindBadNodeNames = fun
+ (Name, BadNames) when is_atom(Name) -> BadNames;
+ (Name, BadNames) -> [Name | BadNames]
+ end,
+ {DiscoveredNodes0, NodeType} =
+ case rabbit_peer_discovery:discover_cluster_nodes() of
+ {error, Reason} ->
+ RetriesLeft1 = RetriesLeft - 1,
+ rabbit_log:error("Peer discovery returned an error: ~p. Will retry after a delay of ~b ms, ~b retries left...",
+ [Reason, DelayInterval, RetriesLeft1]),
+ timer:sleep(DelayInterval),
+ run_peer_discovery_with_retries(RetriesLeft1, DelayInterval);
+ {ok, {Nodes, Type} = Config}
+ when is_list(Nodes) andalso (Type == disc orelse Type == disk orelse Type == ram) ->
+ case lists:foldr(FindBadNodeNames, [], Nodes) of
+ [] -> Config;
+ BadNames -> e({invalid_cluster_node_names, BadNames})
+ end;
+ {ok, {_, BadType}} when BadType /= disc andalso BadType /= ram ->
+ e({invalid_cluster_node_type, BadType});
+ {ok, _} ->
+ e(invalid_cluster_nodes_conf)
+ end,
+ DiscoveredNodes = lists:usort(DiscoveredNodes0),
+ rabbit_log:info("All discovered existing cluster peers: ~s~n",
+ [rabbit_peer_discovery:format_discovered_nodes(DiscoveredNodes)]),
+ Peers = nodes_excl_me(DiscoveredNodes),
+ case Peers of
+ [] ->
+ rabbit_log:info("Discovered no peer nodes to cluster with. "
+ "Some discovery backends can filter nodes out based on a readiness criteria. "
+ "Enabling debug logging might help troubleshoot."),
+ init_db_and_upgrade([node()], disc, false, _Retry = true);
+ _ ->
+ rabbit_log:info("Peer nodes we can cluster with: ~s~n",
+ [rabbit_peer_discovery:format_discovered_nodes(Peers)]),
+ join_discovered_peers(Peers, NodeType)
+ end.
+
+%% Attempts to join discovered,
+%% reachable and compatible (in terms of Mnesia internal protocol version and such)
+%% cluster peers in order.
+join_discovered_peers(TryNodes, NodeType) ->
+ {RetriesLeft, DelayInterval} = rabbit_peer_discovery:discovery_retries(),
+ join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft, DelayInterval).
+
+join_discovered_peers_with_retries(TryNodes, _NodeType, 0, _DelayInterval) ->
+ rabbit_log:warning(
+ "Could not successfully contact any node of: ~s (as in Erlang distribution). "
+ "Starting as a blank standalone node...~n",
+ [string:join(lists:map(fun atom_to_list/1, TryNodes), ",")]),
+ init_db_and_upgrade([node()], disc, false, _Retry = true);
+join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft, DelayInterval) ->
+ case find_reachable_peer_to_cluster_with(nodes_excl_me(TryNodes)) of
+ {ok, Node} ->
+ rabbit_log:info("Node '~s' selected for auto-clustering~n", [Node]),
+ {ok, {_, DiscNodes, _}} = discover_cluster0(Node),
+ init_db_and_upgrade(DiscNodes, NodeType, true, _Retry = true),
+ rabbit_connection_tracking:boot(),
+ rabbit_node_monitor:notify_joined_cluster();
+ none ->
+ RetriesLeft1 = RetriesLeft - 1,
+ rabbit_log:error("Trying to join discovered peers failed. Will retry after a delay of ~b ms, ~b retries left...",
+ [DelayInterval, RetriesLeft1]),
+ timer:sleep(DelayInterval),
+ join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft1, DelayInterval)
+ end.
+
+%% Make the node join a cluster. The node will be reset automatically
+%% before we actually cluster it. The nodes provided will be used to
+%% find out about the nodes in the cluster.
+%%
+%% This function will fail if:
+%%
+%% * The node is currently the only disc node of its cluster
+%% * We can't connect to any of the nodes provided
+%% * The node is currently already clustered with the cluster of the nodes
+%% provided
+%%
+%% Note that we make no attempt to verify that the nodes provided are
+%% all in the same cluster, we simply pick the first online node and
+%% we cluster to its cluster.
+
+-spec join_cluster(node(), node_type())
+ -> ok | {ok, already_member} | {error, {inconsistent_cluster, string()}}.
+
+join_cluster(DiscoveryNode, NodeType) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ case is_only_clustered_disc_node() of
+ true -> e(clustering_only_disc_node);
+ false -> ok
+ end,
+ {ClusterNodes, _, _} = discover_cluster([DiscoveryNode]),
+ case me_in_nodes(ClusterNodes) of
+ false ->
+ case check_cluster_consistency(DiscoveryNode, false) of
+ {ok, _} ->
+ %% reset the node. this simplifies things and it
+ %% will be needed in this case - we're joining a new
+ %% cluster with new nodes which are not in synch
+ %% with the current node. It also lifts the burden
+ %% of resetting the node from the user.
+ reset_gracefully(),
+
+ %% Join the cluster
+ rabbit_log:info("Clustering with ~p as ~p node~n",
+ [ClusterNodes, NodeType]),
+ ok = init_db_with_mnesia(ClusterNodes, NodeType,
+ true, true, _Retry = true),
+ rabbit_connection_tracking:boot(),
+ rabbit_node_monitor:notify_joined_cluster(),
+ ok;
+ {error, Reason} ->
+ {error, Reason}
+ end;
+ true ->
+ %% DiscoveryNode thinks that we are part of a cluster, but
+ %% do we think so ourselves?
+ case are_we_clustered_with(DiscoveryNode) of
+ true ->
+ rabbit_log:info("Asked to join a cluster but already a member of it: ~p~n", [ClusterNodes]),
+ {ok, already_member};
+ false ->
+ Msg = format_inconsistent_cluster_message(DiscoveryNode, node()),
+ rabbit_log:error(Msg),
+ {error, {inconsistent_cluster, Msg}}
+ end
+ end.
+
+%% return node to its virgin state, where it is not member of any
+%% cluster, has no cluster configuration, no local database, and no
+%% persisted messages
+
+-spec reset() -> 'ok'.
+
+reset() ->
+ ensure_mnesia_not_running(),
+ rabbit_log:info("Resetting Rabbit~n", []),
+ reset_gracefully().
+
+-spec force_reset() -> 'ok'.
+
+force_reset() ->
+ ensure_mnesia_not_running(),
+ rabbit_log:info("Resetting Rabbit forcefully~n", []),
+ wipe().
+
+reset_gracefully() ->
+ AllNodes = cluster_nodes(all),
+ %% Reconnecting so that we will get an up to date nodes. We don't
+ %% need to check for consistency because we are resetting.
+ %% Force=true here so that reset still works when clustered with a
+ %% node which is down.
+ init_db_with_mnesia(AllNodes, node_type(), false, false, _Retry = false),
+ case is_only_clustered_disc_node() of
+ true -> e(resetting_only_disc_node);
+ false -> ok
+ end,
+ leave_cluster(),
+ rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema),
+ wipe().
+
+wipe() ->
+ %% We need to make sure that we don't end up in a distributed
+ %% Erlang system with nodes while not being in an Mnesia cluster
+ %% with them. We don't handle that well.
+ [erlang:disconnect_node(N) || N <- cluster_nodes(all)],
+ %% remove persisted messages and any other garbage we find
+ ok = rabbit_file:recursive_delete(filelib:wildcard(dir() ++ "/*")),
+ ok = rabbit_node_monitor:reset_cluster_status(),
+ ok.
+
+-spec change_cluster_node_type(node_type()) -> 'ok'.
+
+change_cluster_node_type(Type) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ case is_clustered() of
+ false -> e(not_clustered);
+ true -> ok
+ end,
+ {_, _, RunningNodes} = discover_cluster(cluster_nodes(all)),
+ %% We might still be marked as running by a remote node since the
+ %% information of us going down might not have propagated yet.
+ Node = case RunningNodes -- [node()] of
+ [] -> e(no_online_cluster_nodes);
+ [Node0|_] -> Node0
+ end,
+ ok = reset(),
+ ok = join_cluster(Node, Type).
+
+-spec update_cluster_nodes(node()) -> 'ok'.
+
+update_cluster_nodes(DiscoveryNode) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ Status = {AllNodes, _, _} = discover_cluster([DiscoveryNode]),
+ case me_in_nodes(AllNodes) of
+ true ->
+ %% As in `check_consistency/0', we can safely delete the
+ %% schema here, since it'll be replicated from the other
+ %% nodes
+ mnesia:delete_schema([node()]),
+ rabbit_node_monitor:write_cluster_status(Status),
+ rabbit_log:info("Updating cluster nodes from ~p~n",
+ [DiscoveryNode]),
+ init_db_with_mnesia(AllNodes, node_type(), true, true, _Retry = false);
+ false ->
+ e(inconsistent_cluster)
+ end,
+ ok.
+
+%% We proceed like this: try to remove the node locally. If the node
+%% is offline, we remove the node if:
+%% * This node is a disc node
+%% * All other nodes are offline
+%% * This node was, at the best of our knowledge (see comment below)
+%% the last or second to last after the node we're removing to go
+%% down
+
+-spec forget_cluster_node(node(), boolean()) -> 'ok'.
+
+forget_cluster_node(Node, RemoveWhenOffline) ->
+ forget_cluster_node(Node, RemoveWhenOffline, true).
+
+forget_cluster_node(Node, RemoveWhenOffline, EmitNodeDeletedEvent) ->
+ case lists:member(Node, cluster_nodes(all)) of
+ true -> ok;
+ false -> e(not_a_cluster_node)
+ end,
+ case {RemoveWhenOffline, is_running()} of
+ {true, false} -> remove_node_offline_node(Node);
+ {true, true} -> e(online_node_offline_flag);
+ {false, false} -> e(offline_node_no_offline_flag);
+ {false, true} -> rabbit_log:info(
+ "Removing node ~p from cluster~n", [Node]),
+ case remove_node_if_mnesia_running(Node) of
+ ok when EmitNodeDeletedEvent ->
+ rabbit_event:notify(node_deleted, [{node, Node}]),
+ ok;
+ ok -> ok;
+ {error, _} = Err -> throw(Err)
+ end
+ end.
+
+remove_node_offline_node(Node) ->
+ %% Here `mnesia:system_info(running_db_nodes)' will RPC, but that's what we
+ %% want - we need to know the running nodes *now*. If the current node is a
+ %% RAM node it will return bogus results, but we don't care since we only do
+ %% this operation from disc nodes.
+ case {mnesia:system_info(running_db_nodes) -- [Node], node_type()} of
+ {[], disc} ->
+ start_mnesia(),
+ try
+ %% What we want to do here is replace the last node to
+ %% go down with the current node. The way we do this
+ %% is by force loading the table, and making sure that
+ %% they are loaded.
+ rabbit_table:force_load(),
+ rabbit_table:wait_for_replicated(_Retry = false),
+ %% We skip the 'node_deleted' event because the
+ %% application is stopped and thus, rabbit_event is not
+ %% enabled.
+ forget_cluster_node(Node, false, false),
+ force_load_next_boot()
+ after
+ stop_mnesia()
+ end;
+ {_, _} ->
+ e(removing_node_from_offline_node)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Queries
+%%----------------------------------------------------------------------------
+
+-spec status() -> [{'nodes', [{node_type(), [node()]}]} |
+ {'running_nodes', [node()]} |
+ {'partitions', [{node(), [node()]}]}].
+
+status() ->
+ IfNonEmpty = fun (_, []) -> [];
+ (Type, Nodes) -> [{Type, Nodes}]
+ end,
+ [{nodes, (IfNonEmpty(disc, cluster_nodes(disc)) ++
+ IfNonEmpty(ram, cluster_nodes(ram)))}] ++
+ case is_running() of
+ true -> RunningNodes = cluster_nodes(running),
+ [{running_nodes, RunningNodes},
+ {cluster_name, rabbit_nodes:cluster_name()},
+ {partitions, mnesia_partitions(RunningNodes)}];
+ false -> []
+ end.
+
+mnesia_partitions(Nodes) ->
+ Replies = rabbit_node_monitor:partitions(Nodes),
+ [Reply || Reply = {_, R} <- Replies, R =/= []].
+
+is_running() -> mnesia:system_info(is_running) =:= yes.
+
+-spec is_clustered() -> boolean().
+
+is_clustered() -> AllNodes = cluster_nodes(all),
+ AllNodes =/= [] andalso AllNodes =/= [node()].
+
+-spec on_running_node(pid()) -> boolean().
+
+on_running_node(Pid) -> lists:member(node(Pid), cluster_nodes(running)).
+
+%% This requires the process be in the same running cluster as us
+%% (i.e. not partitioned or some random node).
+%%
+%% See also rabbit_misc:is_process_alive/1 which does not.
+
+-spec is_process_alive(pid() | {atom(), node()}) -> boolean().
+
+is_process_alive(Pid) when is_pid(Pid) ->
+ on_running_node(Pid) andalso
+ rpc:call(node(Pid), erlang, is_process_alive, [Pid]) =:= true;
+is_process_alive({Name, Node}) ->
+ lists:member(Node, cluster_nodes(running)) andalso
+ rpc:call(Node, rabbit_mnesia, is_registered_process_alive, [Name]) =:= true.
+
+-spec is_registered_process_alive(atom()) -> boolean().
+
+is_registered_process_alive(Name) ->
+ is_pid(whereis(Name)).
+
+-spec cluster_nodes('all' | 'disc' | 'ram' | 'running') -> [node()].
+
+cluster_nodes(WhichNodes) -> cluster_status(WhichNodes).
+
+%% This function is the actual source of information, since it gets
+%% the data from mnesia. Obviously it'll work only when mnesia is
+%% running.
+
+-spec cluster_status_from_mnesia() -> rabbit_types:ok_or_error2(
+ cluster_status(), any()).
+
+cluster_status_from_mnesia() ->
+ case is_running() of
+ false ->
+ {error, mnesia_not_running};
+ true ->
+ %% If the tables are not present, it means that
+ %% `init_db/3' hasn't been run yet. In other words, either
+ %% we are a virgin node or a restarted RAM node. In both
+ %% cases we're not interested in what mnesia has to say.
+ NodeType = case mnesia:system_info(use_dir) of
+ true -> disc;
+ false -> ram
+ end,
+ case rabbit_table:is_present() of
+ true -> AllNodes = mnesia:system_info(db_nodes),
+ DiscCopies = mnesia:table_info(schema, disc_copies),
+ DiscNodes = case NodeType of
+ disc -> nodes_incl_me(DiscCopies);
+ ram -> DiscCopies
+ end,
+ %% `mnesia:system_info(running_db_nodes)' is safe since
+ %% we know that mnesia is running
+ RunningNodes = mnesia:system_info(running_db_nodes),
+ {ok, {AllNodes, DiscNodes, RunningNodes}};
+ false -> {error, tables_not_present}
+ end
+ end.
+
+cluster_status(WhichNodes) ->
+ {AllNodes, DiscNodes, RunningNodes} = Nodes =
+ case cluster_status_from_mnesia() of
+ {ok, Nodes0} ->
+ Nodes0;
+ {error, _Reason} ->
+ {AllNodes0, DiscNodes0, RunningNodes0} =
+ rabbit_node_monitor:read_cluster_status(),
+ %% The cluster status file records the status when the node is
+ %% online, but we know for sure that the node is offline now, so
+ %% we can remove it from the list of running nodes.
+ {AllNodes0, DiscNodes0, nodes_excl_me(RunningNodes0)}
+ end,
+ case WhichNodes of
+ status -> Nodes;
+ all -> AllNodes;
+ disc -> DiscNodes;
+ ram -> AllNodes -- DiscNodes;
+ running -> RunningNodes
+ end.
+
+node_info() ->
+ {rabbit_misc:otp_release(), rabbit_misc:version(),
+ mnesia:system_info(protocol_version),
+ cluster_status_from_mnesia()}.
+
+-spec node_type() -> node_type().
+
+node_type() ->
+ {_AllNodes, DiscNodes, _RunningNodes} =
+ rabbit_node_monitor:read_cluster_status(),
+ case DiscNodes =:= [] orelse me_in_nodes(DiscNodes) of
+ true -> disc;
+ false -> ram
+ end.
+
+-spec dir() -> file:filename().
+
+dir() -> mnesia:system_info(directory).
+
+%%----------------------------------------------------------------------------
+%% Operations on the db
+%%----------------------------------------------------------------------------
+
+%% Adds the provided nodes to the mnesia cluster, creating a new
+%% schema if there is the need to and catching up if there are other
+%% nodes in the cluster already. It also updates the cluster status
+%% file.
+init_db(ClusterNodes, NodeType, CheckOtherNodes) ->
+ NodeIsVirgin = is_virgin_node(),
+ rabbit_log:debug("Does data directory looks like that of a blank (uninitialised) node? ~p", [NodeIsVirgin]),
+ Nodes = change_extra_db_nodes(ClusterNodes, CheckOtherNodes),
+ %% Note that we use `system_info' here and not the cluster status
+ %% since when we start rabbit for the first time the cluster
+ %% status will say we are a disc node but the tables won't be
+ %% present yet.
+ WasDiscNode = mnesia:system_info(use_dir),
+ case {Nodes, WasDiscNode, NodeType} of
+ {[], _, ram} ->
+ %% Standalone ram node, we don't want that
+ throw({error, cannot_create_standalone_ram_node});
+ {[], false, disc} ->
+ %% RAM -> disc, starting from scratch
+ ok = create_schema();
+ {[], true, disc} ->
+ %% First disc node up
+ maybe_force_load(),
+ ok;
+ {[_ | _], _, _} ->
+ %% Subsequent node in cluster, catch up
+ maybe_force_load(),
+ ok = rabbit_table:wait_for_replicated(_Retry = true),
+ ok = rabbit_table:ensure_local_copies(NodeType)
+ end,
+ ensure_feature_flags_are_in_sync(Nodes, NodeIsVirgin),
+ ensure_schema_integrity(),
+ rabbit_node_monitor:update_cluster_status(),
+ ok.
+
+-spec init_db_unchecked([node()], node_type()) -> 'ok'.
+
+init_db_unchecked(ClusterNodes, NodeType) ->
+ init_db(ClusterNodes, NodeType, false).
+
+init_db_and_upgrade(ClusterNodes, NodeType, CheckOtherNodes, Retry) ->
+ ok = init_db(ClusterNodes, NodeType, CheckOtherNodes),
+ ok = case rabbit_upgrade:maybe_upgrade_local() of
+ ok -> ok;
+ starting_from_scratch -> rabbit_version:record_desired();
+ version_not_available -> schema_ok_or_move()
+ end,
+ %% `maybe_upgrade_local' restarts mnesia, so ram nodes will forget
+ %% about the cluster
+ case NodeType of
+ ram -> start_mnesia(),
+ change_extra_db_nodes(ClusterNodes, false);
+ disc -> ok
+ end,
+ %% ...and all nodes will need to wait for tables
+ rabbit_table:wait_for_replicated(Retry),
+ ok.
+
+init_db_with_mnesia(ClusterNodes, NodeType,
+ CheckOtherNodes, CheckConsistency, Retry) ->
+ start_mnesia(CheckConsistency),
+ try
+ init_db_and_upgrade(ClusterNodes, NodeType, CheckOtherNodes, Retry)
+ after
+ stop_mnesia()
+ end.
+
+-spec ensure_mnesia_dir() -> 'ok'.
+
+ensure_mnesia_dir() ->
+ MnesiaDir = dir() ++ "/",
+ case filelib:ensure_dir(MnesiaDir) of
+ {error, Reason} ->
+ throw({error, {cannot_create_mnesia_dir, MnesiaDir, Reason}});
+ ok ->
+ ok
+ end.
+
+ensure_mnesia_running() ->
+ case mnesia:system_info(is_running) of
+ yes ->
+ ok;
+ starting ->
+ wait_for(mnesia_running),
+ ensure_mnesia_running();
+ Reason when Reason =:= no; Reason =:= stopping ->
+ throw({error, mnesia_not_running})
+ end.
+
+ensure_mnesia_not_running() ->
+ case mnesia:system_info(is_running) of
+ no ->
+ ok;
+ stopping ->
+ wait_for(mnesia_not_running),
+ ensure_mnesia_not_running();
+ Reason when Reason =:= yes; Reason =:= starting ->
+ throw({error, mnesia_unexpectedly_running})
+ end.
+
+ensure_feature_flags_are_in_sync(Nodes, NodeIsVirgin) ->
+ Ret = rabbit_feature_flags:sync_feature_flags_with_cluster(
+ Nodes, NodeIsVirgin),
+ case Ret of
+ ok -> ok;
+ {error, Reason} -> throw({error, {incompatible_feature_flags, Reason}})
+ end.
+
+ensure_schema_integrity() ->
+ case rabbit_table:check_schema_integrity(_Retry = true) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ throw({error, {schema_integrity_check_failed, Reason}})
+ end.
+
+-spec copy_db(file:filename()) -> rabbit_types:ok_or_error(any()).
+
+copy_db(Destination) ->
+ ok = ensure_mnesia_not_running(),
+ rabbit_file:recursive_copy(dir(), Destination).
+
+force_load_filename() ->
+ filename:join(dir(), "force_load").
+
+-spec force_load_next_boot() -> 'ok'.
+
+force_load_next_boot() ->
+ rabbit_file:write_file(force_load_filename(), <<"">>).
+
+maybe_force_load() ->
+ case rabbit_file:is_file(force_load_filename()) of
+ true -> rabbit_table:force_load(),
+ rabbit_file:delete(force_load_filename());
+ false -> ok
+ end.
+
+%% This does not guarantee us much, but it avoids some situations that
+%% will definitely end up badly
+
+-spec check_cluster_consistency() -> 'ok'.
+
+check_cluster_consistency() ->
+ %% We want to find 0 or 1 consistent nodes.
+ case lists:foldl(
+ fun (Node, {error, _}) -> check_cluster_consistency(Node, true);
+ (_Node, {ok, Status}) -> {ok, Status}
+ end, {error, not_found}, nodes_excl_me(cluster_nodes(all)))
+ of
+ {ok, Status = {RemoteAllNodes, _, _}} ->
+ case ordsets:is_subset(ordsets:from_list(cluster_nodes(all)),
+ ordsets:from_list(RemoteAllNodes)) of
+ true ->
+ ok;
+ false ->
+ %% We delete the schema here since we think we are
+ %% clustered with nodes that are no longer in the
+ %% cluster and there is no other way to remove
+ %% them from our schema. On the other hand, we are
+ %% sure that there is another online node that we
+ %% can use to sync the tables with. There is a
+ %% race here: if between this check and the
+ %% `init_db' invocation the cluster gets
+ %% disbanded, we're left with a node with no
+ %% mnesia data that will try to connect to offline
+ %% nodes.
+ mnesia:delete_schema([node()])
+ end,
+ rabbit_node_monitor:write_cluster_status(Status);
+ {error, not_found} ->
+ ok;
+ {error, _} = E ->
+ throw(E)
+ end.
+
+check_cluster_consistency(Node, CheckNodesConsistency) ->
+ case remote_node_info(Node) of
+ {badrpc, _Reason} ->
+ {error, not_found};
+ {_OTP, Rabbit, DelegateModuleHash, _Status} when is_binary(DelegateModuleHash) ->
+ %% when a delegate module .beam file hash is present
+ %% in the tuple, we are dealing with an old version
+ rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit);
+ {_OTP, _Rabbit, _Protocol, {error, _}} ->
+ {error, not_found};
+ {OTP, Rabbit, Protocol, {ok, Status}} when CheckNodesConsistency ->
+ case check_consistency(Node, OTP, Rabbit, Protocol, Status) of
+ {error, _} = E -> E;
+ {ok, Res} -> {ok, Res}
+ end;
+ {OTP, Rabbit, Protocol, {ok, Status}} ->
+ case check_consistency(Node, OTP, Rabbit, Protocol) of
+ {error, _} = E -> E;
+ ok -> {ok, Status}
+ end
+ end.
+
+remote_node_info(Node) ->
+ case rpc:call(Node, rabbit_mnesia, node_info, []) of
+ {badrpc, _} = Error -> Error;
+ %% RabbitMQ prior to 3.6.2
+ {OTP, Rabbit, Status} -> {OTP, Rabbit, unsupported, Status};
+ %% RabbitMQ 3.6.2 or later
+ {OTP, Rabbit, Protocol, Status} -> {OTP, Rabbit, Protocol, Status}
+ end.
+
+
+%%--------------------------------------------------------------------
+%% Hooks for `rabbit_node_monitor'
+%%--------------------------------------------------------------------
+
+-spec on_node_up(node()) -> 'ok'.
+
+on_node_up(Node) ->
+ case running_disc_nodes() of
+ [Node] -> rabbit_log:info("cluster contains disc nodes again~n");
+ _ -> ok
+ end.
+
+-spec on_node_down(node()) -> 'ok'.
+
+on_node_down(_Node) ->
+ case running_disc_nodes() of
+ [] -> rabbit_log:info("only running disc node went down~n");
+ _ -> ok
+ end.
+
+running_disc_nodes() ->
+ {_AllNodes, DiscNodes, RunningNodes} = cluster_status(status),
+ ordsets:to_list(ordsets:intersection(ordsets:from_list(DiscNodes),
+ ordsets:from_list(RunningNodes))).
+
+%%--------------------------------------------------------------------
+%% Helpers for diagnostics commands
+%%--------------------------------------------------------------------
+
+schema_info(Items) ->
+ Tables = mnesia:system_info(tables),
+ [info(Table, Items) || Table <- Tables].
+
+info(Table, Items) ->
+ All = [{name, Table} | mnesia:table_info(Table, all)],
+ [{Item, proplists:get_value(Item, All)} || Item <- Items].
+
+%%--------------------------------------------------------------------
+%% Internal helpers
+%%--------------------------------------------------------------------
+
+discover_cluster(Nodes) ->
+ case lists:foldl(fun (_, {ok, Res}) -> {ok, Res};
+ (Node, _) -> discover_cluster0(Node)
+ end, {error, no_nodes_provided}, Nodes) of
+ {ok, Res} -> Res;
+ {error, E} -> throw({error, E});
+ {badrpc, Reason} -> throw({badrpc_multi, Reason, Nodes})
+ end.
+
+discover_cluster0(Node) when Node == node() ->
+ {error, cannot_cluster_node_with_itself};
+discover_cluster0(Node) ->
+ rpc:call(Node, rabbit_mnesia, cluster_status_from_mnesia, []).
+
+schema_ok_or_move() ->
+ case rabbit_table:check_schema_integrity(_Retry = false) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ %% NB: we cannot use rabbit_log here since it may not have been
+ %% started yet
+ rabbit_log:warning("schema integrity check failed: ~p~n"
+ "moving database to backup location "
+ "and recreating schema from scratch~n",
+ [Reason]),
+ ok = move_db(),
+ ok = create_schema()
+ end.
+
+%% We only care about disc nodes since ram nodes are supposed to catch
+%% up only
+create_schema() ->
+ stop_mnesia(),
+ rabbit_log:debug("Will bootstrap a schema database..."),
+ rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema),
+ rabbit_log:debug("Bootstraped a schema database successfully"),
+ start_mnesia(),
+
+ rabbit_log:debug("Will create schema database tables"),
+ ok = rabbit_table:create(),
+ rabbit_log:debug("Created schema database tables successfully"),
+ rabbit_log:debug("Will check schema database integrity..."),
+ ensure_schema_integrity(),
+ rabbit_log:debug("Schema database schema integrity check passed"),
+ ok = rabbit_version:record_desired().
+
+move_db() ->
+ stop_mnesia(),
+ MnesiaDir = filename:dirname(dir() ++ "/"),
+ {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(),
+ BackupDir = rabbit_misc:format(
+ "~s_~w~2..0w~2..0w~2..0w~2..0w~2..0w",
+ [MnesiaDir, Year, Month, Day, Hour, Minute, Second]),
+ case file:rename(MnesiaDir, BackupDir) of
+ ok ->
+ %% NB: we cannot use rabbit_log here since it may not have
+ %% been started yet
+ rabbit_log:warning("moved database from ~s to ~s~n",
+ [MnesiaDir, BackupDir]),
+ ok;
+ {error, Reason} -> throw({error, {cannot_backup_mnesia,
+ MnesiaDir, BackupDir, Reason}})
+ end,
+ ensure_mnesia_dir(),
+ start_mnesia(),
+ ok.
+
+remove_node_if_mnesia_running(Node) ->
+ case is_running() of
+ false ->
+ {error, mnesia_not_running};
+ true ->
+ %% Deleting the the schema copy of the node will result in
+ %% the node being removed from the cluster, with that
+ %% change being propagated to all nodes
+ case mnesia:del_table_copy(schema, Node) of
+ {atomic, ok} ->
+ rabbit_amqqueue:forget_all_durable(Node),
+ rabbit_node_monitor:notify_left_cluster(Node),
+ ok;
+ {aborted, Reason} ->
+ {error, {failed_to_remove_node, Node, Reason}}
+ end
+ end.
+
+leave_cluster() ->
+ case nodes_excl_me(cluster_nodes(all)) of
+ [] -> ok;
+ AllNodes -> case lists:any(fun leave_cluster/1, AllNodes) of
+ true -> ok;
+ false -> e(no_running_cluster_nodes)
+ end
+ end.
+
+leave_cluster(Node) ->
+ case rpc:call(Node,
+ rabbit_mnesia, remove_node_if_mnesia_running, [node()]) of
+ ok -> true;
+ {error, mnesia_not_running} -> false;
+ {error, Reason} -> throw({error, Reason});
+ {badrpc, nodedown} -> false
+ end.
+
+wait_for(Condition) ->
+ rabbit_log:info("Waiting for ~p...~n", [Condition]),
+ timer:sleep(1000).
+
+start_mnesia(CheckConsistency) ->
+ case CheckConsistency of
+ true -> check_cluster_consistency();
+ false -> ok
+ end,
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ ensure_mnesia_running().
+
+start_mnesia() ->
+ start_mnesia(true).
+
+stop_mnesia() ->
+ stopped = mnesia:stop(),
+ ensure_mnesia_not_running().
+
+change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) ->
+ ClusterNodes = nodes_excl_me(ClusterNodes0),
+ case {mnesia:change_config(extra_db_nodes, ClusterNodes), ClusterNodes} of
+ {{ok, []}, [_|_]} when CheckOtherNodes ->
+ throw({error, {failed_to_cluster_with, ClusterNodes,
+ "Mnesia could not connect to any nodes."}});
+ {{ok, Nodes}, _} ->
+ Nodes
+ end.
+
+check_consistency(Node, OTP, Rabbit, ProtocolVersion) ->
+ rabbit_misc:sequence_error(
+ [check_mnesia_or_otp_consistency(Node, ProtocolVersion, OTP),
+ check_rabbit_consistency(Node, Rabbit)]).
+
+check_consistency(Node, OTP, Rabbit, ProtocolVersion, Status) ->
+ rabbit_misc:sequence_error(
+ [check_mnesia_or_otp_consistency(Node, ProtocolVersion, OTP),
+ check_rabbit_consistency(Node, Rabbit),
+ check_nodes_consistency(Node, Status)]).
+
+check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) ->
+ case me_in_nodes(RemoteAllNodes) of
+ true ->
+ {ok, RemoteStatus};
+ false ->
+ {error, {inconsistent_cluster,
+ format_inconsistent_cluster_message(node(), Node)}}
+ end.
+
+check_mnesia_or_otp_consistency(_Node, unsupported, OTP) ->
+ rabbit_version:check_otp_consistency(OTP);
+check_mnesia_or_otp_consistency(Node, ProtocolVersion, _) ->
+ check_mnesia_consistency(Node, ProtocolVersion).
+
+check_mnesia_consistency(Node, ProtocolVersion) ->
+ % If mnesia is running we will just check protocol version
+ % If it's not running, we don't want it to join cluster until all checks pass
+ % so we start it without `dir` env variable to prevent
+ % joining cluster and/or corrupting data
+ with_running_or_clean_mnesia(fun() ->
+ case negotiate_protocol([Node]) of
+ [Node] -> ok;
+ [] ->
+ LocalVersion = mnesia:system_info(protocol_version),
+ {error, {inconsistent_cluster,
+ rabbit_misc:format("Mnesia protocol negotiation failed."
+ " Local version: ~p."
+ " Remote version ~p",
+ [LocalVersion, ProtocolVersion])}}
+ end
+ end).
+
+negotiate_protocol([Node]) ->
+ mnesia_monitor:negotiate_protocol([Node]).
+
+with_running_or_clean_mnesia(Fun) ->
+ IsMnesiaRunning = case mnesia:system_info(is_running) of
+ yes -> true;
+ no -> false;
+ stopping ->
+ ensure_mnesia_not_running(),
+ false;
+ starting ->
+ ensure_mnesia_running(),
+ true
+ end,
+ case IsMnesiaRunning of
+ true -> Fun();
+ false ->
+ SavedMnesiaDir = dir(),
+ application:unset_env(mnesia, dir),
+ SchemaLoc = application:get_env(mnesia, schema_location, opt_disc),
+ application:set_env(mnesia, schema_location, ram),
+ mnesia:start(),
+ Result = Fun(),
+ application:stop(mnesia),
+ application:set_env(mnesia, dir, SavedMnesiaDir),
+ application:set_env(mnesia, schema_location, SchemaLoc),
+ Result
+ end.
+
+check_rabbit_consistency(RemoteNode, RemoteVersion) ->
+ rabbit_misc:sequence_error(
+ [rabbit_version:check_version_consistency(
+ rabbit_misc:version(), RemoteVersion, "Rabbit",
+ fun rabbit_misc:version_minor_equivalent/2),
+ rabbit_feature_flags:check_node_compatibility(RemoteNode)]).
+
+%% This is fairly tricky. We want to know if the node is in the state
+%% that a `reset' would leave it in. We cannot simply check if the
+%% mnesia tables aren't there because restarted RAM nodes won't have
+%% tables while still being non-virgin. What we do instead is to
+%% check if the mnesia directory is non existent or empty, with the
+%% exception of certain files and directories, which can be there very early
+%% on node boot.
+is_virgin_node() ->
+ case rabbit_file:list_dir(dir()) of
+ {error, enoent} ->
+ true;
+ {ok, []} ->
+ true;
+ {ok, List0} ->
+ IgnoredFiles0 =
+ [rabbit_node_monitor:cluster_status_filename(),
+ rabbit_node_monitor:running_nodes_filename(),
+ rabbit_node_monitor:default_quorum_filename(),
+ rabbit_node_monitor:quorum_filename(),
+ rabbit_feature_flags:enabled_feature_flags_list_file()],
+ IgnoredFiles = [filename:basename(File) || File <- IgnoredFiles0],
+ rabbit_log:debug("Files and directories found in node's data directory: ~s, of them to be ignored: ~s",
+ [string:join(lists:usort(List0), ", "), string:join(lists:usort(IgnoredFiles), ", ")]),
+ List = List0 -- IgnoredFiles,
+ rabbit_log:debug("Files and directories found in node's data directory sans ignored ones: ~s", [string:join(lists:usort(List), ", ")]),
+ List =:= []
+ end.
+
+find_reachable_peer_to_cluster_with([]) ->
+ none;
+find_reachable_peer_to_cluster_with([Node | Nodes]) ->
+ Fail = fun (Fmt, Args) ->
+ rabbit_log:warning(
+ "Could not auto-cluster with node ~s: " ++ Fmt, [Node | Args]),
+ find_reachable_peer_to_cluster_with(Nodes)
+ end,
+ case remote_node_info(Node) of
+ {badrpc, _} = Reason ->
+ Fail("~p~n", [Reason]);
+ %% old delegate hash check
+ {_OTP, RMQ, Hash, _} when is_binary(Hash) ->
+ Fail("version ~s~n", [RMQ]);
+ {_OTP, _RMQ, _Protocol, {error, _} = E} ->
+ Fail("~p~n", [E]);
+ {OTP, RMQ, Protocol, _} ->
+ case check_consistency(Node, OTP, RMQ, Protocol) of
+ {error, _} -> Fail("versions ~p~n",
+ [{OTP, RMQ}]);
+ ok -> {ok, Node}
+ end
+ end.
+
+is_only_clustered_disc_node() ->
+ node_type() =:= disc andalso is_clustered() andalso
+ cluster_nodes(disc) =:= [node()].
+
+are_we_clustered_with(Node) ->
+ lists:member(Node, mnesia_lib:all_nodes()).
+
+me_in_nodes(Nodes) -> lists:member(node(), Nodes).
+
+nodes_incl_me(Nodes) -> lists:usort([node()|Nodes]).
+
+nodes_excl_me(Nodes) -> Nodes -- [node()].
+
+-spec e(any()) -> no_return().
+
+e(Tag) -> throw({error, {Tag, error_description(Tag)}}).
+
+error_description({invalid_cluster_node_names, BadNames}) ->
+ "In the 'cluster_nodes' configuration key, the following node names "
+ "are invalid: " ++ lists:flatten(io_lib:format("~p", [BadNames]));
+error_description({invalid_cluster_node_type, BadType}) ->
+ "In the 'cluster_nodes' configuration key, the node type is invalid "
+ "(expected 'disc' or 'ram'): " ++
+ lists:flatten(io_lib:format("~p", [BadType]));
+error_description(invalid_cluster_nodes_conf) ->
+ "The 'cluster_nodes' configuration key is invalid, it must be of the "
+ "form {[Nodes], Type}, where Nodes is a list of node names and "
+ "Type is either 'disc' or 'ram'";
+error_description(clustering_only_disc_node) ->
+ "You cannot cluster a node if it is the only disc node in its existing "
+ " cluster. If new nodes joined while this node was offline, use "
+ "'update_cluster_nodes' to add them manually.";
+error_description(resetting_only_disc_node) ->
+ "You cannot reset a node when it is the only disc node in a cluster. "
+ "Please convert another node of the cluster to a disc node first.";
+error_description(not_clustered) ->
+ "Non-clustered nodes can only be disc nodes.";
+error_description(no_online_cluster_nodes) ->
+ "Could not find any online cluster nodes. If the cluster has changed, "
+ "you can use the 'update_cluster_nodes' command.";
+error_description(inconsistent_cluster) ->
+ "The nodes provided do not have this node as part of the cluster.";
+error_description(not_a_cluster_node) ->
+ "The node selected is not in the cluster.";
+error_description(online_node_offline_flag) ->
+ "You set the --offline flag, which is used to remove nodes remotely from "
+ "offline nodes, but this node is online.";
+error_description(offline_node_no_offline_flag) ->
+ "You are trying to remove a node from an offline node. That is dangerous, "
+ "but can be done with the --offline flag. Please consult the manual "
+ "for rabbitmqctl for more information.";
+error_description(removing_node_from_offline_node) ->
+ "To remove a node remotely from an offline node, the node you are removing "
+ "from must be a disc node and all the other nodes must be offline.";
+error_description(no_running_cluster_nodes) ->
+ "You cannot leave a cluster if no online nodes are present.".
+
+format_inconsistent_cluster_message(Thinker, Dissident) ->
+ rabbit_misc:format("Node ~p thinks it's clustered "
+ "with node ~p, but ~p disagrees",
+ [Thinker, Dissident, Dissident]).
diff --git a/deps/rabbit/src/rabbit_mnesia_rename.erl b/deps/rabbit/src/rabbit_mnesia_rename.erl
new file mode 100644
index 0000000000..e0d88c0f5e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mnesia_rename.erl
@@ -0,0 +1,276 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mnesia_rename).
+-include("rabbit.hrl").
+
+-export([rename/2]).
+-export([maybe_finish/1]).
+
+-define(CONVERT_TABLES, [schema, rabbit_durable_queue]).
+
+%% Supports renaming the nodes in the Mnesia database. In order to do
+%% this, we take a backup of the database, traverse the backup
+%% changing node names and pids as we go, then restore it.
+%%
+%% That's enough for a standalone node, for clusters the story is more
+%% complex. We can take pairs of nodes From and To, but backing up and
+%% restoring the database changes schema cookies, so if we just do
+%% this on all nodes the cluster will refuse to re-form with
+%% "Incompatible schema cookies.". Therefore we do something similar
+%% to what we do for upgrades - the first node in the cluster to
+%% restart becomes the authority, and other nodes wipe their own
+%% Mnesia state and rejoin. They also need to tell Mnesia the old node
+%% is not coming back.
+%%
+%% If we are renaming nodes one at a time then the running cluster
+%% might not be aware that a rename has taken place, so after we wipe
+%% and rejoin we then update any tables (in practice just
+%% rabbit_durable_queue) which should be aware that we have changed.
+
+%%----------------------------------------------------------------------------
+
+-spec rename(node(), [{node(), node()}]) -> 'ok'.
+
+rename(Node, NodeMapList) ->
+ try
+ %% Check everything is correct and figure out what we are
+ %% changing from and to.
+ {FromNode, ToNode, NodeMap} = prepare(Node, NodeMapList),
+
+ %% We backup and restore Mnesia even if other nodes are
+ %% running at the time, and defer the final decision about
+ %% whether to use our mutated copy or rejoin the cluster until
+ %% we restart. That means we might be mutating our copy of the
+ %% database while the cluster is running. *Do not* contact the
+ %% cluster while this is happening, we are likely to get
+ %% confused.
+ application:set_env(kernel, dist_auto_connect, never),
+
+ %% Take a copy we can restore from if we abandon the
+ %% rename. We don't restore from the "backup" since restoring
+ %% that changes schema cookies and might stop us rejoining the
+ %% cluster.
+ ok = rabbit_mnesia:copy_db(mnesia_copy_dir()),
+
+ %% And make the actual changes
+ become(FromNode),
+ take_backup(before_backup_name()),
+ convert_backup(NodeMap, before_backup_name(), after_backup_name()),
+ ok = rabbit_file:write_term_file(rename_config_name(),
+ [{FromNode, ToNode}]),
+ convert_config_files(NodeMap),
+ become(ToNode),
+ restore_backup(after_backup_name()),
+ ok
+ after
+ stop_mnesia()
+ end.
+
+prepare(Node, NodeMapList) ->
+ %% If we have a previous rename and haven't started since, give up.
+ case rabbit_file:is_dir(dir()) of
+ true -> exit({rename_in_progress,
+ "Restart node under old name to roll back"});
+ false -> ok = rabbit_file:ensure_dir(mnesia_copy_dir())
+ end,
+
+ %% Check we don't have two nodes mapped to the same node
+ {FromNodes, ToNodes} = lists:unzip(NodeMapList),
+ case length(FromNodes) - length(lists:usort(ToNodes)) of
+ 0 -> ok;
+ _ -> exit({duplicate_node, ToNodes})
+ end,
+
+ %% Figure out which node we are before and after the change
+ FromNode = case [From || {From, To} <- NodeMapList,
+ To =:= Node] of
+ [N] -> N;
+ [] -> Node
+ end,
+ NodeMap = dict:from_list(NodeMapList),
+ ToNode = case dict:find(FromNode, NodeMap) of
+ {ok, N2} -> N2;
+ error -> FromNode
+ end,
+
+ %% Check that we are in the cluster, all old nodes are in the
+ %% cluster, and no new nodes are.
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ case {FromNodes -- Nodes, ToNodes -- (ToNodes -- Nodes),
+ lists:member(Node, Nodes ++ ToNodes)} of
+ {[], [], true} -> ok;
+ {[], [], false} -> exit({i_am_not_involved, Node});
+ {F, [], _} -> exit({nodes_not_in_cluster, F});
+ {_, T, _} -> exit({nodes_already_in_cluster, T})
+ end,
+ {FromNode, ToNode, NodeMap}.
+
+take_backup(Backup) ->
+ start_mnesia(),
+ %% We backup only local tables: in particular, this excludes the
+ %% connection tracking tables which have no local replica.
+ LocalTables = mnesia:system_info(local_tables),
+ {ok, Name, _Nodes} = mnesia:activate_checkpoint([
+ {max, LocalTables}
+ ]),
+ ok = mnesia:backup_checkpoint(Name, Backup),
+ stop_mnesia().
+
+restore_backup(Backup) ->
+ ok = mnesia:install_fallback(Backup, [{scope, local}]),
+ start_mnesia(),
+ stop_mnesia(),
+ rabbit_mnesia:force_load_next_boot().
+
+-spec maybe_finish([node()]) -> 'ok'.
+
+maybe_finish(AllNodes) ->
+ case rabbit_file:read_term_file(rename_config_name()) of
+ {ok, [{FromNode, ToNode}]} -> finish(FromNode, ToNode, AllNodes);
+ _ -> ok
+ end.
+
+finish(FromNode, ToNode, AllNodes) ->
+ case node() of
+ ToNode ->
+ case rabbit_upgrade:nodes_running(AllNodes) of
+ [] -> finish_primary(FromNode, ToNode);
+ _ -> finish_secondary(FromNode, ToNode, AllNodes)
+ end;
+ FromNode ->
+ rabbit_log:info(
+ "Abandoning rename from ~s to ~s since we are still ~s~n",
+ [FromNode, ToNode, FromNode]),
+ [{ok, _} = file:copy(backup_of_conf(F), F) || F <- config_files()],
+ ok = rabbit_file:recursive_delete([rabbit_mnesia:dir()]),
+ ok = rabbit_file:recursive_copy(
+ mnesia_copy_dir(), rabbit_mnesia:dir()),
+ delete_rename_files();
+ _ ->
+ %% Boot will almost certainly fail but we might as
+ %% well just log this
+ rabbit_log:info(
+ "Rename attempted from ~s to ~s but we are ~s - ignoring.~n",
+ [FromNode, ToNode, node()])
+ end.
+
+finish_primary(FromNode, ToNode) ->
+ rabbit_log:info("Restarting as primary after rename from ~s to ~s~n",
+ [FromNode, ToNode]),
+ delete_rename_files(),
+ ok.
+
+finish_secondary(FromNode, ToNode, AllNodes) ->
+ rabbit_log:info("Restarting as secondary after rename from ~s to ~s~n",
+ [FromNode, ToNode]),
+ rabbit_upgrade:secondary_upgrade(AllNodes),
+ rename_in_running_mnesia(FromNode, ToNode),
+ delete_rename_files(),
+ ok.
+
+dir() -> rabbit_mnesia:dir() ++ "-rename".
+before_backup_name() -> dir() ++ "/backup-before".
+after_backup_name() -> dir() ++ "/backup-after".
+rename_config_name() -> dir() ++ "/pending.config".
+mnesia_copy_dir() -> dir() ++ "/mnesia-copy".
+
+delete_rename_files() -> ok = rabbit_file:recursive_delete([dir()]).
+
+start_mnesia() -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ rabbit_table:force_load(),
+ rabbit_table:wait_for_replicated(_Retry = false).
+stop_mnesia() -> stopped = mnesia:stop().
+
+convert_backup(NodeMap, FromBackup, ToBackup) ->
+ mnesia:traverse_backup(
+ FromBackup, ToBackup,
+ fun
+ (Row, Acc) ->
+ case lists:member(element(1, Row), ?CONVERT_TABLES) of
+ true -> {[update_term(NodeMap, Row)], Acc};
+ false -> {[Row], Acc}
+ end
+ end, switched).
+
+config_files() ->
+ [rabbit_node_monitor:running_nodes_filename(),
+ rabbit_node_monitor:cluster_status_filename()].
+
+backup_of_conf(Path) ->
+ filename:join([dir(), filename:basename(Path)]).
+
+convert_config_files(NodeMap) ->
+ [convert_config_file(NodeMap, Path) || Path <- config_files()].
+
+convert_config_file(NodeMap, Path) ->
+ {ok, Term} = rabbit_file:read_term_file(Path),
+ {ok, _} = file:copy(Path, backup_of_conf(Path)),
+ ok = rabbit_file:write_term_file(Path, update_term(NodeMap, Term)).
+
+lookup_node(OldNode, NodeMap) ->
+ case dict:find(OldNode, NodeMap) of
+ {ok, NewNode} -> NewNode;
+ error -> OldNode
+ end.
+
+mini_map(FromNode, ToNode) -> dict:from_list([{FromNode, ToNode}]).
+
+update_term(NodeMap, L) when is_list(L) ->
+ [update_term(NodeMap, I) || I <- L];
+update_term(NodeMap, T) when is_tuple(T) ->
+ list_to_tuple(update_term(NodeMap, tuple_to_list(T)));
+update_term(NodeMap, Node) when is_atom(Node) ->
+ lookup_node(Node, NodeMap);
+update_term(NodeMap, Pid) when is_pid(Pid) ->
+ rabbit_misc:pid_change_node(Pid, lookup_node(node(Pid), NodeMap));
+update_term(_NodeMap, Term) ->
+ Term.
+
+rename_in_running_mnesia(FromNode, ToNode) ->
+ All = rabbit_mnesia:cluster_nodes(all),
+ Running = rabbit_nodes:all_running(),
+ case {lists:member(FromNode, Running), lists:member(ToNode, All)} of
+ {false, true} -> ok;
+ {true, _} -> exit({old_node_running, FromNode});
+ {_, false} -> exit({new_node_not_in_cluster, ToNode})
+ end,
+ {atomic, ok} = mnesia:del_table_copy(schema, FromNode),
+ Map = mini_map(FromNode, ToNode),
+ {atomic, _} = transform_table(rabbit_durable_queue, Map),
+ ok.
+
+transform_table(Table, Map) ->
+ mnesia:sync_transaction(
+ fun () ->
+ mnesia:lock({table, Table}, write),
+ transform_table(Table, Map, mnesia:first(Table))
+ end).
+
+transform_table(_Table, _Map, '$end_of_table') ->
+ ok;
+transform_table(Table, Map, Key) ->
+ [Term] = mnesia:read(Table, Key, write),
+ ok = mnesia:write(Table, update_term(Map, Term), write),
+ transform_table(Table, Map, mnesia:next(Table, Key)).
+
+become(BecomeNode) ->
+ error_logger:tty(false),
+ case net_adm:ping(BecomeNode) of
+ pong -> exit({node_running, BecomeNode});
+ pang -> ok = net_kernel:stop(),
+ io:format(" * Impersonating node: ~s...", [BecomeNode]),
+ {ok, _} = start_distribution(BecomeNode),
+ io:format(" done~n", []),
+ Dir = mnesia:system_info(directory),
+ io:format(" * Mnesia directory : ~s~n", [Dir])
+ end.
+
+start_distribution(Name) ->
+ rabbit_nodes:ensure_epmd(),
+ NameType = rabbit_nodes_common:name_type(Name),
+ net_kernel:start([Name, NameType]).
diff --git a/deps/rabbit/src/rabbit_msg_file.erl b/deps/rabbit/src/rabbit_msg_file.erl
new file mode 100644
index 0000000000..1a24f690a0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_file.erl
@@ -0,0 +1,114 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_file).
+
+-export([append/3, read/2, scan/4]).
+
+%%----------------------------------------------------------------------------
+
+-include("rabbit_msg_store.hrl").
+
+-define(INTEGER_SIZE_BYTES, 8).
+-define(INTEGER_SIZE_BITS, (8 * ?INTEGER_SIZE_BYTES)).
+-define(WRITE_OK_SIZE_BITS, 8).
+-define(WRITE_OK_MARKER, 255).
+-define(FILE_PACKING_ADJUSTMENT, (1 + ?INTEGER_SIZE_BYTES)).
+-define(MSG_ID_SIZE_BYTES, 16).
+-define(MSG_ID_SIZE_BITS, (8 * ?MSG_ID_SIZE_BYTES)).
+-define(SCAN_BLOCK_SIZE, 4194304). %% 4MB
+
+%%----------------------------------------------------------------------------
+
+-type io_device() :: any().
+-type position() :: non_neg_integer().
+-type msg_size() :: non_neg_integer().
+-type file_size() :: non_neg_integer().
+-type message_accumulator(A) ::
+ fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) ->
+ A).
+
+%%----------------------------------------------------------------------------
+
+-spec append(io_device(), rabbit_types:msg_id(), msg()) ->
+ rabbit_types:ok_or_error2(msg_size(), any()).
+
+append(FileHdl, MsgId, MsgBody)
+ when is_binary(MsgId) andalso size(MsgId) =:= ?MSG_ID_SIZE_BYTES ->
+ MsgBodyBin = term_to_binary(MsgBody),
+ MsgBodyBinSize = size(MsgBodyBin),
+ Size = MsgBodyBinSize + ?MSG_ID_SIZE_BYTES,
+ case file_handle_cache:append(FileHdl,
+ <<Size:?INTEGER_SIZE_BITS,
+ MsgId:?MSG_ID_SIZE_BYTES/binary,
+ MsgBodyBin:MsgBodyBinSize/binary,
+ ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>) of
+ ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT};
+ KO -> KO
+ end.
+
+-spec read(io_device(), msg_size()) ->
+ rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()},
+ any()).
+
+read(FileHdl, TotalSize) ->
+ Size = TotalSize - ?FILE_PACKING_ADJUSTMENT,
+ BodyBinSize = Size - ?MSG_ID_SIZE_BYTES,
+ case file_handle_cache:read(FileHdl, TotalSize) of
+ {ok, <<Size:?INTEGER_SIZE_BITS,
+ MsgId:?MSG_ID_SIZE_BYTES/binary,
+ MsgBodyBin:BodyBinSize/binary,
+ ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>} ->
+ {ok, {MsgId, binary_to_term(MsgBodyBin)}};
+ KO -> KO
+ end.
+
+-spec scan(io_device(), file_size(), message_accumulator(A), A) ->
+ {'ok', A, position()}.
+
+scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 ->
+ scan(FileHdl, FileSize, <<>>, 0, 0, Fun, Acc).
+
+scan(_FileHdl, FileSize, _Data, FileSize, ScanOffset, _Fun, Acc) ->
+ {ok, Acc, ScanOffset};
+scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) ->
+ Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]),
+ case file_handle_cache:read(FileHdl, Read) of
+ {ok, Data1} ->
+ {Data2, Acc1, ScanOffset1} =
+ scanner(<<Data/binary, Data1/binary>>, ScanOffset, Fun, Acc),
+ ReadOffset1 = ReadOffset + size(Data1),
+ scan(FileHdl, FileSize, Data2, ReadOffset1, ScanOffset1, Fun, Acc1);
+ _KO ->
+ {ok, Acc, ScanOffset}
+ end.
+
+scanner(<<>>, Offset, _Fun, Acc) ->
+ {<<>>, Acc, Offset};
+scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) ->
+ {<<>>, Acc, Offset}; %% Nothing to do other than stop.
+scanner(<<Size:?INTEGER_SIZE_BITS, MsgIdAndMsg:Size/binary,
+ WriteMarker:?WRITE_OK_SIZE_BITS, Rest/binary>>, Offset, Fun, Acc) ->
+ TotalSize = Size + ?FILE_PACKING_ADJUSTMENT,
+ case WriteMarker of
+ ?WRITE_OK_MARKER ->
+ %% Here we take option 5 from
+ %% https://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in
+ %% which we read the MsgId as a number, and then convert it
+ %% back to a binary in order to work around bugs in
+ %% Erlang's GC.
+ <<MsgIdNum:?MSG_ID_SIZE_BITS, Msg/binary>> =
+ <<MsgIdAndMsg:Size/binary>>,
+ <<MsgId:?MSG_ID_SIZE_BYTES/binary>> =
+ <<MsgIdNum:?MSG_ID_SIZE_BITS>>,
+ scanner(Rest, Offset + TotalSize, Fun,
+ Fun({MsgId, TotalSize, Offset, Msg}, Acc));
+ _ ->
+ scanner(Rest, Offset + TotalSize, Fun, Acc)
+ end;
+scanner(Data, Offset, _Fun, Acc) ->
+ {Data, Acc, Offset}.
diff --git a/deps/rabbit/src/rabbit_msg_record.erl b/deps/rabbit/src/rabbit_msg_record.erl
new file mode 100644
index 0000000000..3ebe14cb9f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_record.erl
@@ -0,0 +1,400 @@
+-module(rabbit_msg_record).
+
+-export([
+ init/1,
+ to_iodata/1,
+ from_amqp091/2,
+ to_amqp091/1,
+ add_message_annotations/2,
+ message_annotation/2,
+ message_annotation/3
+ ]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-type maybe(T) :: T | undefined.
+-type amqp10_data() :: #'v1_0.data'{} |
+ [#'v1_0.amqp_sequence'{} | #'v1_0.data'{}] |
+ #'v1_0.amqp_value'{}.
+-record(msg,
+ {
+ % header :: maybe(#'v1_0.header'{}),
+ % delivery_annotations :: maybe(#'v1_0.delivery_annotations'{}),
+ message_annotations :: maybe(#'v1_0.message_annotations'{}),
+ properties :: maybe(#'v1_0.properties'{}),
+ application_properties :: maybe(#'v1_0.application_properties'{}),
+ data :: maybe(amqp10_data())
+ % footer :: maybe(#'v1_0.footer'{})
+ }).
+
+%% holds static or rarely changing fields
+-record(cfg, {}).
+-record(?MODULE, {cfg :: #cfg{},
+ msg :: #msg{},
+ %% holds a list of modifications to various sections
+ changes = [] :: list()}).
+
+-opaque state() :: #?MODULE{}.
+
+-export_type([
+ state/0
+ ]).
+
+%% this module acts as a wrapper / converter for the internal binar storage format
+%% (AMQP 1.0) and any format it needs to be converted to / from.
+%% Efficiency is key. No unnecessary allocations or work should be done until it
+%% is absolutely needed
+
+%% init from an AMQP 1.0 encoded binary
+-spec init(binary()) -> state().
+init(Bin) when is_binary(Bin) ->
+ %% TODO: delay parsing until needed
+ {MA, P, AP, D} = decode(amqp10_framing:decode_bin(Bin),
+ {undefined, undefined, undefined, undefined}),
+ #?MODULE{cfg = #cfg{},
+ msg = #msg{properties = P,
+ application_properties = AP,
+ message_annotations = MA,
+ data = D}}.
+
+decode([], Acc) ->
+ Acc;
+decode([#'v1_0.message_annotations'{} = MA | Rem], {_, P, AP, D}) ->
+ decode(Rem, {MA, P, AP, D});
+decode([#'v1_0.properties'{} = P | Rem], {MA, _, AP, D}) ->
+ decode(Rem, {MA, P, AP, D});
+decode([#'v1_0.application_properties'{} = AP | Rem], {MA, P, _, D}) ->
+ decode(Rem, {MA, P, AP, D});
+decode([#'v1_0.data'{} = D | Rem], {MA, P, AP, _}) ->
+ decode(Rem, {MA, P, AP, D}).
+
+amqp10_properties_empty(#'v1_0.properties'{message_id = undefined,
+ user_id = undefined,
+ to = undefined,
+ % subject = wrap(utf8, RKey),
+ reply_to = undefined,
+ correlation_id = undefined,
+ content_type = undefined,
+ content_encoding = undefined,
+ creation_time = undefined}) ->
+ true;
+amqp10_properties_empty(_) ->
+ false.
+
+%% to realise the final binary data representation
+-spec to_iodata(state()) -> iodata().
+to_iodata(#?MODULE{msg = #msg{properties = P,
+ application_properties = AP,
+ message_annotations = MA,
+ data = Data}}) ->
+ [
+ case MA of
+ #'v1_0.message_annotations'{content = []} ->
+ <<>>;
+ _ ->
+ amqp10_framing:encode_bin(MA)
+ end,
+ case amqp10_properties_empty(P) of
+ true -> <<>>;
+ false ->
+ amqp10_framing:encode_bin(P)
+ end,
+ case AP of
+ #'v1_0.application_properties'{content = []} ->
+ <<>>;
+ _ ->
+ amqp10_framing:encode_bin(AP)
+ end,
+ amqp10_framing:encode_bin(Data)
+ ].
+
+%% TODO: refine type spec here
+-spec add_message_annotations(#{binary() => {atom(), term()}}, state()) ->
+ state().
+add_message_annotations(Anns,
+ #?MODULE{msg =
+ #msg{message_annotations = MA0} = Msg} = State) ->
+ Content = maps:fold(
+ fun (K, {T, V}, Acc) ->
+ map_add(symbol, K, T, V, Acc)
+ end,
+ case MA0 of
+ undefined -> [];
+ #'v1_0.message_annotations'{content = C} -> C
+ end,
+ Anns),
+
+ State#?MODULE{msg =
+ Msg#msg{message_annotations =
+ #'v1_0.message_annotations'{content = Content}}}.
+
+%% TODO: refine
+-type amqp10_term() :: {atom(), term()}.
+
+-spec message_annotation(binary(), state()) -> undefined | amqp10_term().
+message_annotation(Key, State) ->
+ message_annotation(Key, State, undefined).
+
+-spec message_annotation(binary(), state(), undefined | amqp10_term()) ->
+ undefined | amqp10_term().
+message_annotation(_Key, #?MODULE{msg = #msg{message_annotations = undefined}},
+ Default) ->
+ Default;
+message_annotation(Key,
+ #?MODULE{msg =
+ #msg{message_annotations =
+ #'v1_0.message_annotations'{content = Content}}},
+ Default)
+ when is_binary(Key) ->
+ case lists:search(fun ({{symbol, K}, _}) -> K == Key end, Content) of
+ {value, {_K, V}} ->
+ V;
+ false ->
+ Default
+ end.
+
+
+%% take a binary AMQP 1.0 input function,
+%% parses it and returns the current parse state
+%% this is the input function from storage and from, e.g. socket input
+-spec from_amqp091(#'P_basic'{}, iodata()) -> state().
+from_amqp091(#'P_basic'{message_id = MsgId,
+ expiration = Expiration,
+ delivery_mode = DelMode,
+ headers = Headers,
+ user_id = UserId,
+ reply_to = ReplyTo,
+ type = Type,
+ priority = Priority,
+ app_id = AppId,
+ correlation_id = CorrId,
+ content_type = ContentType,
+ content_encoding = ContentEncoding,
+ timestamp = Timestamp
+ }, Data) ->
+ %% TODO: support parsing properties bin directly?
+ ConvertedTs = case Timestamp of
+ undefined ->
+ undefined;
+ _ ->
+ Timestamp * 1000
+ end,
+ P = #'v1_0.properties'{message_id = wrap(utf8, MsgId),
+ user_id = wrap(binary, UserId),
+ to = undefined,
+ % subject = wrap(utf8, RKey),
+ reply_to = wrap(utf8, ReplyTo),
+ correlation_id = wrap(utf8, CorrId),
+ content_type = wrap(symbol, ContentType),
+ content_encoding = wrap(symbol, ContentEncoding),
+ creation_time = wrap(timestamp, ConvertedTs)},
+
+ APC0 = [{wrap(utf8, K), from_091(T, V)} || {K, T, V}
+ <- case Headers of
+ undefined -> [];
+ _ -> Headers
+ end],
+ %% properties that do not map directly to AMQP 1.0 properties are stored
+ %% in application properties
+ APC = map_add(utf8, <<"x-basic-type">>, utf8, Type,
+ map_add(utf8, <<"x-basic-app-id">>, utf8, AppId, APC0)),
+
+ MAC = map_add(symbol, <<"x-basic-priority">>, ubyte, Priority,
+ map_add(symbol, <<"x-basic-delivery-mode">>, ubyte, DelMode,
+ map_add(symbol, <<"x-basic-expiration">>, utf8, Expiration, []))),
+
+ AP = #'v1_0.application_properties'{content = APC},
+ MA = #'v1_0.message_annotations'{content = MAC},
+ #?MODULE{cfg = #cfg{},
+ msg = #msg{properties = P,
+ application_properties = AP,
+ message_annotations = MA,
+ data = #'v1_0.data'{content = Data}}}.
+
+map_add(_T, _Key, _Type, undefined, Acc) ->
+ Acc;
+map_add(KeyType, Key, Type, Value, Acc) ->
+ [{wrap(KeyType, Key), wrap(Type, Value)} | Acc].
+
+-spec to_amqp091(state()) -> {#'P_basic'{}, iodata()}.
+to_amqp091(#?MODULE{msg = #msg{properties = P,
+ application_properties = APR,
+ message_annotations = MAR,
+ data = #'v1_0.data'{content = Payload}}}) ->
+ #'v1_0.properties'{message_id = MsgId,
+ user_id = UserId,
+ reply_to = ReplyTo0,
+ correlation_id = CorrId,
+ content_type = ContentType,
+ content_encoding = ContentEncoding,
+ creation_time = Timestamp} = case P of
+ undefined ->
+ #'v1_0.properties'{};
+ _ ->
+ P
+ end,
+
+ AP0 = case APR of
+ #'v1_0.application_properties'{content = AC} -> AC;
+ _ -> []
+ end,
+ MA0 = case MAR of
+ #'v1_0.message_annotations'{content = MC} -> MC;
+ _ -> []
+ end,
+
+ {Type, AP1} = amqp10_map_get(utf8(<<"x-basic-type">>), AP0),
+ {AppId, AP} = amqp10_map_get(utf8(<<"x-basic-app-id">>), AP1),
+
+ {Priority, MA1} = amqp10_map_get(symbol(<<"x-basic-priority">>), MA0),
+ {DelMode, MA2} = amqp10_map_get(symbol(<<"x-basic-delivery-mode">>), MA1),
+ {Expiration, _MA} = amqp10_map_get(symbol(<<"x-basic-expiration">>), MA2),
+
+ Headers0 = [to_091(unwrap(K), V) || {K, V} <- AP],
+ {Headers1, MsgId091} = message_id(MsgId, <<"x-message-id-type">>, Headers0),
+ {Headers, CorrId091} = message_id(CorrId, <<"x-correlation-id-type">>, Headers1),
+
+ BP = #'P_basic'{message_id = MsgId091,
+ delivery_mode = DelMode,
+ expiration = Expiration,
+ user_id = unwrap(UserId),
+ headers = case Headers of
+ [] -> undefined;
+ _ -> Headers
+ end,
+ reply_to = unwrap(ReplyTo0),
+ type = Type,
+ app_id = AppId,
+ priority = Priority,
+ correlation_id = CorrId091,
+ content_type = unwrap(ContentType),
+ content_encoding = unwrap(ContentEncoding),
+ timestamp = case unwrap(Timestamp) of
+ undefined ->
+ undefined;
+ Ts ->
+ Ts div 1000
+ end
+ },
+ {BP, Payload}.
+
+%%% Internal
+
+amqp10_map_get(K, AP0) ->
+ case lists:keytake(K, 1, AP0) of
+ false ->
+ {undefined, AP0};
+ {value, {_, V}, AP} ->
+ {unwrap(V), AP}
+ end.
+
+wrap(_Type, undefined) ->
+ undefined;
+wrap(Type, Val) ->
+ {Type, Val}.
+
+unwrap(undefined) ->
+ undefined;
+unwrap({_Type, V}) ->
+ V.
+
+% symbol_for(#'v1_0.properties'{}) ->
+% {symbol, <<"amqp:properties:list">>};
+
+% number_for(#'v1_0.properties'{}) ->
+% {ulong, 115};
+% encode(Frame = #'v1_0.properties'{}) ->
+% amqp10_framing:encode_described(list, 115, Frame);
+
+% encode_described(list, CodeNumber, Frame) ->
+% {described, {ulong, CodeNumber},
+% {list, lists:map(fun encode/1, tl(tuple_to_list(Frame)))}};
+
+% -spec generate(amqp10_type()) -> iolist().
+% generate({described, Descriptor, Value}) ->
+% DescBin = generate(Descriptor),
+% ValueBin = generate(Value),
+% [ ?DESCRIBED_BIN, DescBin, ValueBin ].
+
+to_091(Key, {utf8, V}) when is_binary(V) -> {Key, longstr, V};
+to_091(Key, {long, V}) -> {Key, long, V};
+to_091(Key, {byte, V}) -> {Key, byte, V};
+to_091(Key, {ubyte, V}) -> {Key, unsignedbyte, V};
+to_091(Key, {short, V}) -> {Key, short, V};
+to_091(Key, {ushort, V}) -> {Key, unsignedshort, V};
+to_091(Key, {uint, V}) -> {Key, unsignedint, V};
+to_091(Key, {int, V}) -> {Key, signedint, V};
+to_091(Key, {double, V}) -> {Key, double, V};
+to_091(Key, {float, V}) -> {Key, float, V};
+%% NB: header values can never be shortstr!
+to_091(Key, {timestamp, V}) -> {Key, timestamp, V div 1000};
+to_091(Key, {binary, V}) -> {Key, binary, V};
+to_091(Key, {boolean, V}) -> {Key, bool, V};
+to_091(Key, true) -> {Key, bool, true};
+to_091(Key, false) -> {Key, bool, false}.
+
+from_091(longstr, V) when is_binary(V) -> {utf8, V};
+from_091(long, V) -> {long, V};
+from_091(unsignedbyte, V) -> {ubyte, V};
+from_091(short, V) -> {short, V};
+from_091(unsignedshort, V) -> {ushort, V};
+from_091(unsignedint, V) -> {uint, V};
+from_091(signedint, V) -> {int, V};
+from_091(double, V) -> {double, V};
+from_091(float, V) -> {float, V};
+from_091(bool, V) -> {boolean, V};
+from_091(binary, V) -> {binary, V};
+from_091(timestamp, V) -> {timestamp, V * 1000};
+from_091(byte, V) -> {byte, V}.
+
+% convert_header(signedint, V) -> [$I, <<V:32/signed>>];
+% convert_header(decimal, V) -> {Before, After} = V,
+% [$D, Before, <<After:32>>];
+% convert_header(timestamp, V) -> [$T, <<V:64>>];
+% % convert_header(table, V) -> [$F | table_to_binary(V)];
+% % convert_header(array, V) -> [$A | array_to_binary(V)];
+% convert_header(byte, V) -> [$b, <<V:8/signed>>];
+% convert_header(double, V) -> [$d, <<V:64/float>>];
+% convert_header(float, V) -> [$f, <<V:32/float>>];
+% convert_header(short, V) -> [$s, <<V:16/signed>>];
+% convert_header(binary, V) -> [$x | long_string_to_binary(V)];
+% convert_header(unsignedbyte, V) -> [$B, <<V:8/unsigned>>];
+% convert_header(unsignedshort, V) -> [$u, <<V:16/unsigned>>];
+% convert_header(unsignedint, V) -> [$i, <<V:32/unsigned>>];
+% convert_header(void, _V) -> [$V].
+
+utf8(T) -> {utf8, T}.
+symbol(T) -> {symbol, T}.
+
+message_id({uuid, UUID}, HKey, H0) ->
+ H = [{HKey, longstr, <<"uuid">>} | H0],
+ {H, rabbit_data_coercion:to_binary(rabbit_guid:to_string(UUID))};
+message_id({ulong, N}, HKey, H0) ->
+ H = [{HKey, longstr, <<"ulong">>} | H0],
+ {H, erlang:integer_to_binary(N)};
+message_id({binary, B}, HKey, H0) ->
+ E = base64:encode(B),
+ case byte_size(E) > 256 of
+ true ->
+ K = binary:replace(HKey, <<"-type">>, <<>>),
+ {[{K, longstr, B} | H0], undefined};
+ false ->
+ H = [{HKey, longstr, <<"binary">>} | H0],
+ {H, E}
+ end;
+message_id({utf8, S}, HKey, H0) ->
+ case byte_size(S) > 256 of
+ true ->
+ K = binary:replace(HKey, <<"-type">>, <<>>),
+ {[{K, longstr, S} | H0], undefined};
+ false ->
+ {H0, S}
+ end;
+message_id(MsgId, _, H) ->
+ {H, unwrap(MsgId)}.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
diff --git a/deps/rabbit/src/rabbit_msg_store.erl b/deps/rabbit/src/rabbit_msg_store.erl
new file mode 100644
index 0000000000..4851e56248
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_store.erl
@@ -0,0 +1,2245 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_store).
+
+-behaviour(gen_server2).
+
+-export([start_link/4, start_global_store_link/4, successfully_recovered_state/1,
+ client_init/4, client_terminate/1, client_delete_and_terminate/1,
+ client_ref/1, close_all_indicated/1,
+ write/3, write_flow/3, read/2, contains/2, remove/2]).
+
+-export([set_maximum_since_use/2, combine_files/3,
+ delete_file/2]). %% internal
+
+-export([scan_file_for_valid_messages/1]). %% salvage tool
+
+-export([transform_dir/3, force_recovery/2]). %% upgrade
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, prioritise_call/4, prioritise_cast/3,
+ prioritise_info/3, format_message_queue/2]).
+
+%%----------------------------------------------------------------------------
+
+-include("rabbit_msg_store.hrl").
+
+-define(SYNC_INTERVAL, 25). %% milliseconds
+-define(CLEAN_FILENAME, "clean.dot").
+-define(FILE_SUMMARY_FILENAME, "file_summary.ets").
+-define(TRANSFORM_TMP, "transform_tmp").
+
+-define(BINARY_MODE, [raw, binary]).
+-define(READ_MODE, [read]).
+-define(READ_AHEAD_MODE, [read_ahead | ?READ_MODE]).
+-define(WRITE_MODE, [write]).
+
+-define(FILE_EXTENSION, ".rdq").
+-define(FILE_EXTENSION_TMP, ".rdt").
+
+-define(HANDLE_CACHE_BUFFER_SIZE, 1048576). %% 1MB
+
+ %% i.e. two pairs, so GC does not go idle when busy
+-define(MAXIMUM_SIMULTANEOUS_GC_FILES, 4).
+
+%%----------------------------------------------------------------------------
+
+-record(msstate,
+ {
+ %% store directory
+ dir,
+ %% the module for index ops,
+ %% rabbit_msg_store_ets_index by default
+ index_module,
+ %% where are messages?
+ index_state,
+ %% current file name as number
+ current_file,
+ %% current file handle since the last fsync?
+ current_file_handle,
+ %% file handle cache
+ file_handle_cache,
+ %% TRef for our interval timer
+ sync_timer_ref,
+ %% sum of valid data in all files
+ sum_valid_data,
+ %% sum of file sizes
+ sum_file_size,
+ %% things to do once GC completes
+ pending_gc_completion,
+ %% pid of our GC
+ gc_pid,
+ %% tid of the shared file handles table
+ file_handles_ets,
+ %% tid of the file summary table
+ file_summary_ets,
+ %% tid of current file cache table
+ cur_file_cache_ets,
+ %% tid of writes/removes in flight
+ flying_ets,
+ %% set of dying clients
+ dying_clients,
+ %% map of references of all registered clients
+ %% to callbacks
+ clients,
+ %% boolean: did we recover state?
+ successfully_recovered,
+ %% how big are our files allowed to get?
+ file_size_limit,
+ %% client ref to synced messages mapping
+ cref_to_msg_ids,
+ %% See CREDIT_DISC_BOUND in rabbit.hrl
+ credit_disc_bound
+ }).
+
+-record(client_msstate,
+ { server,
+ client_ref,
+ file_handle_cache,
+ index_state,
+ index_module,
+ dir,
+ gc_pid,
+ file_handles_ets,
+ file_summary_ets,
+ cur_file_cache_ets,
+ flying_ets,
+ credit_disc_bound
+ }).
+
+-record(file_summary,
+ {file, valid_total_size, left, right, file_size, locked, readers}).
+
+-record(gc_state,
+ { dir,
+ index_module,
+ index_state,
+ file_summary_ets,
+ file_handles_ets,
+ msg_store
+ }).
+
+-record(dying_client,
+ { client_ref,
+ file,
+ offset
+ }).
+
+%%----------------------------------------------------------------------------
+
+-export_type([gc_state/0, file_num/0]).
+
+-type gc_state() :: #gc_state { dir :: file:filename(),
+ index_module :: atom(),
+ index_state :: any(),
+ file_summary_ets :: ets:tid(),
+ file_handles_ets :: ets:tid(),
+ msg_store :: server()
+ }.
+
+-type server() :: pid() | atom().
+-type client_ref() :: binary().
+-type file_num() :: non_neg_integer().
+-type client_msstate() :: #client_msstate {
+ server :: server(),
+ client_ref :: client_ref(),
+ file_handle_cache :: map(),
+ index_state :: any(),
+ index_module :: atom(),
+ dir :: file:filename(),
+ gc_pid :: pid(),
+ file_handles_ets :: ets:tid(),
+ file_summary_ets :: ets:tid(),
+ cur_file_cache_ets :: ets:tid(),
+ flying_ets :: ets:tid(),
+ credit_disc_bound :: {pos_integer(), pos_integer()}}.
+-type msg_ref_delta_gen(A) ::
+ fun ((A) -> 'finished' |
+ {rabbit_types:msg_id(), non_neg_integer(), A}).
+-type maybe_msg_id_fun() ::
+ 'undefined' | fun ((gb_sets:set(), 'written' | 'ignored') -> any()).
+-type maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok').
+-type deletion_thunk() :: fun (() -> boolean()).
+
+%%----------------------------------------------------------------------------
+
+%% We run GC whenever (garbage / sum_file_size) > ?GARBAGE_FRACTION
+%% It is not recommended to set this to < 0.5
+-define(GARBAGE_FRACTION, 0.5).
+
+%% Message store is responsible for storing messages
+%% on disk and loading them back. The store handles both
+%% persistent messages and transient ones (when a node
+%% is under RAM pressure and needs to page messages out
+%% to disk). The store is responsible for locating messages
+%% on disk and maintaining an index.
+%%
+%% There are two message stores per node: one for transient
+%% and one for persistent messages.
+%%
+%% Queue processes interact with the stores via clients.
+%%
+%% The components:
+%%
+%% Index: this is a mapping from MsgId to #msg_location{}.
+%% By default, it's in ETS, but other implementations can
+%% be used.
+%% FileSummary: this maps File to #file_summary{} and is stored
+%% in ETS.
+%%
+%% The basic idea is that messages are appended to the current file up
+%% until that file becomes too big (> file_size_limit). At that point,
+%% the file is closed and a new file is created on the _right_ of the
+%% old file which is used for new messages. Files are named
+%% numerically ascending, thus the file with the lowest name is the
+%% eldest file.
+%%
+%% We need to keep track of which messages are in which files (this is
+%% the index); how much useful data is in each file and which files
+%% are on the left and right of each other. This is the purpose of the
+%% file summary ETS table.
+%%
+%% As messages are removed from files, holes appear in these
+%% files. The field ValidTotalSize contains the total amount of useful
+%% data left in the file. This is needed for garbage collection.
+%%
+%% When we discover that a file is now empty, we delete it. When we
+%% discover that it can be combined with the useful data in either its
+%% left or right neighbour, and overall, across all the files, we have
+%% ((the amount of garbage) / (the sum of all file sizes)) >
+%% ?GARBAGE_FRACTION, we start a garbage collection run concurrently,
+%% which will compact the two files together. This keeps disk
+%% utilisation high and aids performance. We deliberately do this
+%% lazily in order to prevent doing GC on files which are soon to be
+%% emptied (and hence deleted).
+%%
+%% Given the compaction between two files, the left file (i.e. elder
+%% file) is considered the ultimate destination for the good data in
+%% the right file. If necessary, the good data in the left file which
+%% is fragmented throughout the file is written out to a temporary
+%% file, then read back in to form a contiguous chunk of good data at
+%% the start of the left file. Thus the left file is garbage collected
+%% and compacted. Then the good data from the right file is copied
+%% onto the end of the left file. Index and file summary tables are
+%% updated.
+%%
+%% On non-clean startup, we scan the files we discover, dealing with
+%% the possibilities of a crash having occurred during a compaction
+%% (this consists of tidyup - the compaction is deliberately designed
+%% such that data is duplicated on disk rather than risking it being
+%% lost), and rebuild the file summary and index ETS table.
+%%
+%% So, with this design, messages move to the left. Eventually, they
+%% should end up in a contiguous block on the left and are then never
+%% rewritten. But this isn't quite the case. If in a file there is one
+%% message that is being ignored, for some reason, and messages in the
+%% file to the right and in the current block are being read all the
+%% time then it will repeatedly be the case that the good data from
+%% both files can be combined and will be written out to a new
+%% file. Whenever this happens, our shunned message will be rewritten.
+%%
+%% So, provided that we combine messages in the right order,
+%% (i.e. left file, bottom to top, right file, bottom to top),
+%% eventually our shunned message will end up at the bottom of the
+%% left file. The compaction/combining algorithm is smart enough to
+%% read in good data from the left file that is scattered throughout
+%% (i.e. C and D in the below diagram), then truncate the file to just
+%% above B (i.e. truncate to the limit of the good contiguous region
+%% at the start of the file), then write C and D on top and then write
+%% E, F and G from the right file on top. Thus contiguous blocks of
+%% good data at the bottom of files are not rewritten.
+%%
+%% +-------+ +-------+ +-------+
+%% | X | | G | | G |
+%% +-------+ +-------+ +-------+
+%% | D | | X | | F |
+%% +-------+ +-------+ +-------+
+%% | X | | X | | E |
+%% +-------+ +-------+ +-------+
+%% | C | | F | ===> | D |
+%% +-------+ +-------+ +-------+
+%% | X | | X | | C |
+%% +-------+ +-------+ +-------+
+%% | B | | X | | B |
+%% +-------+ +-------+ +-------+
+%% | A | | E | | A |
+%% +-------+ +-------+ +-------+
+%% left right left
+%%
+%% From this reasoning, we do have a bound on the number of times the
+%% message is rewritten. From when it is inserted, there can be no
+%% files inserted between it and the head of the queue, and the worst
+%% case is that every time it is rewritten, it moves one position lower
+%% in the file (for it to stay at the same position requires that
+%% there are no holes beneath it, which means truncate would be used
+%% and so it would not be rewritten at all). Thus this seems to
+%% suggest the limit is the number of messages ahead of it in the
+%% queue, though it's likely that that's pessimistic, given the
+%% requirements for compaction/combination of files.
+%%
+%% The other property that we have is the bound on the lowest
+%% utilisation, which should be 50% - worst case is that all files are
+%% fractionally over half full and can't be combined (equivalent is
+%% alternating full files and files with only one tiny message in
+%% them).
+%%
+%% Messages are reference-counted. When a message with the same msg id
+%% is written several times we only store it once, and only remove it
+%% from the store when it has been removed the same number of times.
+%%
+%% The reference counts do not persist. Therefore the initialisation
+%% function must be provided with a generator that produces ref count
+%% deltas for all recovered messages. This is only used on startup
+%% when the shutdown was non-clean.
+%%
+%% Read messages with a reference count greater than one are entered
+%% into a message cache. The purpose of the cache is not especially
+%% performance, though it can help there too, but prevention of memory
+%% explosion. It ensures that as messages with a high reference count
+%% are read from several processes they are read back as the same
+%% binary object rather than multiples of identical binary
+%% objects.
+%%
+%% Reads can be performed directly by clients without calling to the
+%% server. This is safe because multiple file handles can be used to
+%% read files. However, locking is used by the concurrent GC to make
+%% sure that reads are not attempted from files which are in the
+%% process of being garbage collected.
+%%
+%% When a message is removed, its reference count is decremented. Even
+%% if the reference count becomes 0, its entry is not removed. This is
+%% because in the event of the same message being sent to several
+%% different queues, there is the possibility of one queue writing and
+%% removing the message before other queues write it at all. Thus
+%% accommodating 0-reference counts allows us to avoid unnecessary
+%% writes here. Of course, there are complications: the file to which
+%% the message has already been written could be locked pending
+%% deletion or GC, which means we have to rewrite the message as the
+%% original copy will now be lost.
+%%
+%% The server automatically defers reads, removes and contains calls
+%% that occur which refer to files which are currently being
+%% GC'd. Contains calls are only deferred in order to ensure they do
+%% not overtake removes.
+%%
+%% The current file to which messages are being written has a
+%% write-back cache. This is written to immediately by clients and can
+%% be read from by clients too. This means that there are only ever
+%% writes made to the current file, thus eliminating delays due to
+%% flushing write buffers in order to be able to safely read from the
+%% current file. The one exception to this is that on start up, the
+%% cache is not populated with msgs found in the current file, and
+%% thus in this case only, reads may have to come from the file
+%% itself. The effect of this is that even if the msg_store process is
+%% heavily overloaded, clients can still write and read messages with
+%% very low latency and not block at all.
+%%
+%% Clients of the msg_store are required to register before using the
+%% msg_store. This provides them with the necessary client-side state
+%% to allow them to directly access the various caches and files. When
+%% they terminate, they should deregister. They can do this by calling
+%% either client_terminate/1 or client_delete_and_terminate/1. The
+%% differences are: (a) client_terminate is synchronous. As a result,
+%% if the msg_store is badly overloaded and has lots of in-flight
+%% writes and removes to process, this will take some time to
+%% return. However, once it does return, you can be sure that all the
+%% actions you've issued to the msg_store have been processed. (b) Not
+%% only is client_delete_and_terminate/1 asynchronous, but it also
+%% permits writes and subsequent removes from the current
+%% (terminating) client which are still in flight to be safely
+%% ignored. Thus from the point of view of the msg_store itself, and
+%% all from the same client:
+%%
+%% (T) = termination; (WN) = write of msg N; (RN) = remove of msg N
+%% --> W1, W2, W1, R1, T, W3, R2, W2, R1, R2, R3, W4 -->
+%%
+%% The client obviously sent T after all the other messages (up to
+%% W4), but because the msg_store prioritises messages, the T can be
+%% promoted and thus received early.
+%%
+%% Thus at the point of the msg_store receiving T, we have messages 1
+%% and 2 with a refcount of 1. After T, W3 will be ignored because
+%% it's an unknown message, as will R3, and W4. W2, R1 and R2 won't be
+%% ignored because the messages that they refer to were already known
+%% to the msg_store prior to T. However, it can be a little more
+%% complex: after the first R2, the refcount of msg 2 is 0. At that
+%% point, if a GC occurs or file deletion, msg 2 could vanish, which
+%% would then mean that the subsequent W2 and R2 are then ignored.
+%%
+%% The use case then for client_delete_and_terminate/1 is if the
+%% client wishes to remove everything it's written to the msg_store:
+%% it issues removes for all messages it's written and not removed,
+%% and then calls client_delete_and_terminate/1. At that point, any
+%% in-flight writes (and subsequent removes) can be ignored, but
+%% removes and writes for messages the msg_store already knows about
+%% will continue to be processed normally (which will normally just
+%% involve modifying the reference count, which is fast). Thus we save
+%% disk bandwidth for writes which are going to be immediately removed
+%% again by the the terminating client.
+%%
+%% We use a separate set to keep track of the dying clients in order
+%% to keep that set, which is inspected on every write and remove, as
+%% small as possible. Inspecting the set of all clients would degrade
+%% performance with many healthy clients and few, if any, dying
+%% clients, which is the typical case.
+%%
+%% Client termination messages are stored in a separate ets index to
+%% avoid filling primary message store index and message files with
+%% client termination messages.
+%%
+%% When the msg_store has a backlog (i.e. it has unprocessed messages
+%% in its mailbox / gen_server priority queue), a further optimisation
+%% opportunity arises: we can eliminate pairs of 'write' and 'remove'
+%% from the same client for the same message. A typical occurrence of
+%% these is when an empty durable queue delivers persistent messages
+%% to ack'ing consumers. The queue will asynchronously ask the
+%% msg_store to 'write' such messages, and when they are acknowledged
+%% it will issue a 'remove'. That 'remove' may be issued before the
+%% msg_store has processed the 'write'. There is then no point going
+%% ahead with the processing of that 'write'.
+%%
+%% To detect this situation a 'flying_ets' table is shared between the
+%% clients and the server. The table is keyed on the combination of
+%% client (reference) and msg id, and the value represents an
+%% integration of all the writes and removes currently "in flight" for
+%% that message between the client and server - '+1' means all the
+%% writes/removes add up to a single 'write', '-1' to a 'remove', and
+%% '0' to nothing. (NB: the integration can never add up to more than
+%% one 'write' or 'read' since clients must not write/remove a message
+%% more than once without first removing/writing it).
+%%
+%% Maintaining this table poses two challenges: 1) both the clients
+%% and the server access and update the table, which causes
+%% concurrency issues, 2) we must ensure that entries do not stay in
+%% the table forever, since that would constitute a memory leak. We
+%% address the former by carefully modelling all operations as
+%% sequences of atomic actions that produce valid results in all
+%% possible interleavings. We address the latter by deleting table
+%% entries whenever the server finds a 0-valued entry during the
+%% processing of a write/remove. 0 is essentially equivalent to "no
+%% entry". If, OTOH, the value is non-zero we know there is at least
+%% one other 'write' or 'remove' in flight, so we get an opportunity
+%% later to delete the table entry when processing these.
+%%
+%% There are two further complications. We need to ensure that 1)
+%% eliminated writes still get confirmed, and 2) the write-back cache
+%% doesn't grow unbounded. These are quite straightforward to
+%% address. See the comments in the code.
+%%
+%% For notes on Clean Shutdown and startup, see documentation in
+%% rabbit_variable_queue.
+
+%%----------------------------------------------------------------------------
+%% public API
+%%----------------------------------------------------------------------------
+
+-spec start_link
+ (atom(), file:filename(), [binary()] | 'undefined',
+ {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error().
+
+start_link(Type, Dir, ClientRefs, StartupFunState) when is_atom(Type) ->
+ gen_server2:start_link(?MODULE,
+ [Type, Dir, ClientRefs, StartupFunState],
+ [{timeout, infinity}]).
+
+start_global_store_link(Type, Dir, ClientRefs, StartupFunState) when is_atom(Type) ->
+ gen_server2:start_link({local, Type}, ?MODULE,
+ [Type, Dir, ClientRefs, StartupFunState],
+ [{timeout, infinity}]).
+
+-spec successfully_recovered_state(server()) -> boolean().
+
+successfully_recovered_state(Server) ->
+ gen_server2:call(Server, successfully_recovered_state, infinity).
+
+-spec client_init(server(), client_ref(), maybe_msg_id_fun(),
+ maybe_close_fds_fun()) -> client_msstate().
+
+client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) when is_pid(Server); is_atom(Server) ->
+ {IState, IModule, Dir, GCPid,
+ FileHandlesEts, FileSummaryEts, CurFileCacheEts, FlyingEts} =
+ gen_server2:call(
+ Server, {new_client_state, Ref, self(), MsgOnDiskFun, CloseFDsFun},
+ infinity),
+ CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
+ ?CREDIT_DISC_BOUND),
+ #client_msstate { server = Server,
+ client_ref = Ref,
+ file_handle_cache = #{},
+ index_state = IState,
+ index_module = IModule,
+ dir = Dir,
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ credit_disc_bound = CreditDiscBound }.
+
+-spec client_terminate(client_msstate()) -> 'ok'.
+
+client_terminate(CState = #client_msstate { client_ref = Ref }) ->
+ close_all_handles(CState),
+ ok = server_call(CState, {client_terminate, Ref}).
+
+-spec client_delete_and_terminate(client_msstate()) -> 'ok'.
+
+client_delete_and_terminate(CState = #client_msstate { client_ref = Ref }) ->
+ close_all_handles(CState),
+ ok = server_cast(CState, {client_dying, Ref}),
+ ok = server_cast(CState, {client_delete, Ref}).
+
+-spec client_ref(client_msstate()) -> client_ref().
+
+client_ref(#client_msstate { client_ref = Ref }) -> Ref.
+
+-spec write_flow(rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'.
+
+write_flow(MsgId, Msg,
+ CState = #client_msstate {
+ server = Server,
+ credit_disc_bound = CreditDiscBound }) ->
+ %% Here we are tracking messages sent by the
+ %% rabbit_amqqueue_process process via the
+ %% rabbit_variable_queue. We are accessing the
+ %% rabbit_amqqueue_process process dictionary.
+ credit_flow:send(Server, CreditDiscBound),
+ client_write(MsgId, Msg, flow, CState).
+
+-spec write(rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'.
+
+write(MsgId, Msg, CState) -> client_write(MsgId, Msg, noflow, CState).
+
+-spec read(rabbit_types:msg_id(), client_msstate()) ->
+ {rabbit_types:ok(msg()) | 'not_found', client_msstate()}.
+
+read(MsgId,
+ CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts }) ->
+ file_handle_cache_stats:update(msg_store_read),
+ %% Check the cur file cache
+ case ets:lookup(CurFileCacheEts, MsgId) of
+ [] ->
+ Defer = fun() -> {server_call(CState, {read, MsgId}), CState} end,
+ case index_lookup_positive_ref_count(MsgId, CState) of
+ not_found -> Defer();
+ MsgLocation -> client_read1(MsgLocation, Defer, CState)
+ end;
+ [{MsgId, Msg, _CacheRefCount}] ->
+ {{ok, Msg}, CState}
+ end.
+
+-spec contains(rabbit_types:msg_id(), client_msstate()) -> boolean().
+
+contains(MsgId, CState) -> server_call(CState, {contains, MsgId}).
+
+-spec remove([rabbit_types:msg_id()], client_msstate()) -> 'ok'.
+
+remove([], _CState) -> ok;
+remove(MsgIds, CState = #client_msstate { client_ref = CRef }) ->
+ [client_update_flying(-1, MsgId, CState) || MsgId <- MsgIds],
+ server_cast(CState, {remove, CRef, MsgIds}).
+
+-spec set_maximum_since_use(server(), non_neg_integer()) -> 'ok'.
+
+set_maximum_since_use(Server, Age) when is_pid(Server); is_atom(Server) ->
+ gen_server2:cast(Server, {set_maximum_since_use, Age}).
+
+%%----------------------------------------------------------------------------
+%% Client-side-only helpers
+%%----------------------------------------------------------------------------
+
+server_call(#client_msstate { server = Server }, Msg) ->
+ gen_server2:call(Server, Msg, infinity).
+
+server_cast(#client_msstate { server = Server }, Msg) ->
+ gen_server2:cast(Server, Msg).
+
+client_write(MsgId, Msg, Flow,
+ CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts,
+ client_ref = CRef }) ->
+ file_handle_cache_stats:update(msg_store_write),
+ ok = client_update_flying(+1, MsgId, CState),
+ ok = update_msg_cache(CurFileCacheEts, MsgId, Msg),
+ ok = server_cast(CState, {write, CRef, MsgId, Flow}).
+
+client_read1(#msg_location { msg_id = MsgId, file = File } = MsgLocation, Defer,
+ CState = #client_msstate { file_summary_ets = FileSummaryEts }) ->
+ case ets:lookup(FileSummaryEts, File) of
+ [] -> %% File has been GC'd and no longer exists. Go around again.
+ read(MsgId, CState);
+ [#file_summary { locked = Locked, right = Right }] ->
+ client_read2(Locked, Right, MsgLocation, Defer, CState)
+ end.
+
+client_read2(false, undefined, _MsgLocation, Defer, _CState) ->
+ %% Although we've already checked both caches and not found the
+ %% message there, the message is apparently in the
+ %% current_file. We can only arrive here if we are trying to read
+ %% a message which we have not written, which is very odd, so just
+ %% defer.
+ %%
+ %% OR, on startup, the cur_file_cache is not populated with the
+ %% contents of the current file, thus reads from the current file
+ %% will end up here and will need to be deferred.
+ Defer();
+client_read2(true, _Right, _MsgLocation, Defer, _CState) ->
+ %% Of course, in the mean time, the GC could have run and our msg
+ %% is actually in a different file, unlocked. However, deferring is
+ %% the safest and simplest thing to do.
+ Defer();
+client_read2(false, _Right,
+ MsgLocation = #msg_location { msg_id = MsgId, file = File },
+ Defer,
+ CState = #client_msstate { file_summary_ets = FileSummaryEts }) ->
+ %% It's entirely possible that everything we're doing from here on
+ %% is for the wrong file, or a non-existent file, as a GC may have
+ %% finished.
+ safe_ets_update_counter(
+ FileSummaryEts, File, {#file_summary.readers, +1},
+ fun (_) -> client_read3(MsgLocation, Defer, CState) end,
+ fun () -> read(MsgId, CState) end).
+
+client_read3(#msg_location { msg_id = MsgId, file = File }, Defer,
+ CState = #client_msstate { file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ gc_pid = GCPid,
+ client_ref = Ref }) ->
+ Release =
+ fun() -> ok = case ets:update_counter(FileSummaryEts, File,
+ {#file_summary.readers, -1}) of
+ 0 -> case ets:lookup(FileSummaryEts, File) of
+ [#file_summary { locked = true }] ->
+ rabbit_msg_store_gc:no_readers(
+ GCPid, File);
+ _ -> ok
+ end;
+ _ -> ok
+ end
+ end,
+ %% If a GC involving the file hasn't already started, it won't
+ %% start now. Need to check again to see if we've been locked in
+ %% the meantime, between lookup and update_counter (thus GC
+ %% started before our +1. In fact, it could have finished by now
+ %% too).
+ case ets:lookup(FileSummaryEts, File) of
+ [] -> %% GC has deleted our file, just go round again.
+ read(MsgId, CState);
+ [#file_summary { locked = true }] ->
+ %% If we get a badarg here, then the GC has finished and
+ %% deleted our file. Try going around again. Otherwise,
+ %% just defer.
+ %%
+ %% badarg scenario: we lookup, msg_store locks, GC starts,
+ %% GC ends, we +1 readers, msg_store ets:deletes (and
+ %% unlocks the dest)
+ try Release(),
+ Defer()
+ catch error:badarg -> read(MsgId, CState)
+ end;
+ [#file_summary { locked = false }] ->
+ %% Ok, we're definitely safe to continue - a GC involving
+ %% the file cannot start up now, and isn't running, so
+ %% nothing will tell us from now on to close the handle if
+ %% it's already open.
+ %%
+ %% Finally, we need to recheck that the msg is still at
+ %% the same place - it's possible an entire GC ran between
+ %% us doing the lookup and the +1 on the readers. (Same as
+ %% badarg scenario above, but we don't have a missing file
+ %% - we just have the /wrong/ file).
+ case index_lookup(MsgId, CState) of
+ #msg_location { file = File } = MsgLocation ->
+ %% Still the same file.
+ {ok, CState1} = close_all_indicated(CState),
+ %% We are now guaranteed that the mark_handle_open
+ %% call will either insert_new correctly, or will
+ %% fail, but find the value is open, not close.
+ mark_handle_open(FileHandlesEts, File, Ref),
+ %% Could the msg_store now mark the file to be
+ %% closed? No: marks for closing are issued only
+ %% when the msg_store has locked the file.
+ %% This will never be the current file
+ {Msg, CState2} = read_from_disk(MsgLocation, CState1),
+ Release(), %% this MUST NOT fail with badarg
+ {{ok, Msg}, CState2};
+ #msg_location {} = MsgLocation -> %% different file!
+ Release(), %% this MUST NOT fail with badarg
+ client_read1(MsgLocation, Defer, CState);
+ not_found -> %% it seems not to exist. Defer, just to be sure.
+ try Release() %% this can badarg, same as locked case, above
+ catch error:badarg -> ok
+ end,
+ Defer()
+ end
+ end.
+
+client_update_flying(Diff, MsgId, #client_msstate { flying_ets = FlyingEts,
+ client_ref = CRef }) ->
+ Key = {MsgId, CRef},
+ case ets:insert_new(FlyingEts, {Key, Diff}) of
+ true -> ok;
+ false -> try ets:update_counter(FlyingEts, Key, {2, Diff}) of
+ 0 -> ok;
+ Diff -> ok;
+ Err -> throw({bad_flying_ets_update, Diff, Err, Key})
+ catch error:badarg ->
+ %% this is guaranteed to succeed since the
+ %% server only removes and updates flying_ets
+ %% entries; it never inserts them
+ true = ets:insert_new(FlyingEts, {Key, Diff})
+ end,
+ ok
+ end.
+
+clear_client(CRef, State = #msstate { cref_to_msg_ids = CTM,
+ dying_clients = DyingClients }) ->
+ State #msstate { cref_to_msg_ids = maps:remove(CRef, CTM),
+ dying_clients = maps:remove(CRef, DyingClients) }.
+
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+
+init([Type, BaseDir, ClientRefs, StartupFunState]) ->
+ process_flag(trap_exit, true),
+
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+
+ Dir = filename:join(BaseDir, atom_to_list(Type)),
+ Name = filename:join(filename:basename(BaseDir), atom_to_list(Type)),
+
+ {ok, IndexModule} = application:get_env(rabbit, msg_store_index_module),
+ rabbit_log:info("Message store ~tp: using ~p to provide index~n", [Name, IndexModule]),
+
+ AttemptFileSummaryRecovery =
+ case ClientRefs of
+ undefined -> ok = rabbit_file:recursive_delete([Dir]),
+ ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
+ false;
+ _ -> ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
+ recover_crashed_compactions(Dir)
+ end,
+ %% if we found crashed compactions we trust neither the
+ %% file_summary nor the location index. Note the file_summary is
+ %% left empty here if it can't be recovered.
+ {FileSummaryRecovered, FileSummaryEts} =
+ recover_file_summary(AttemptFileSummaryRecovery, Dir),
+ {CleanShutdown, IndexState, ClientRefs1} =
+ recover_index_and_client_refs(IndexModule, FileSummaryRecovered,
+ ClientRefs, Dir, Name),
+ Clients = maps:from_list(
+ [{CRef, {undefined, undefined, undefined}} ||
+ CRef <- ClientRefs1]),
+ %% CleanShutdown => msg location index and file_summary both
+ %% recovered correctly.
+ true = case {FileSummaryRecovered, CleanShutdown} of
+ {true, false} -> ets:delete_all_objects(FileSummaryEts);
+ _ -> true
+ end,
+ %% CleanShutdown <=> msg location index and file_summary both
+ %% recovered correctly.
+
+ FileHandlesEts = ets:new(rabbit_msg_store_shared_file_handles,
+ [ordered_set, public]),
+ CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]),
+ FlyingEts = ets:new(rabbit_msg_store_flying, [set, public]),
+
+ {ok, FileSizeLimit} = application:get_env(rabbit, msg_store_file_size_limit),
+
+ {ok, GCPid} = rabbit_msg_store_gc:start_link(
+ #gc_state { dir = Dir,
+ index_module = IndexModule,
+ index_state = IndexState,
+ file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ msg_store = self()
+ }),
+
+ CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
+ ?CREDIT_DISC_BOUND),
+
+ State = #msstate { dir = Dir,
+ index_module = IndexModule,
+ index_state = IndexState,
+ current_file = 0,
+ current_file_handle = undefined,
+ file_handle_cache = #{},
+ sync_timer_ref = undefined,
+ sum_valid_data = 0,
+ sum_file_size = 0,
+ pending_gc_completion = maps:new(),
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ dying_clients = #{},
+ clients = Clients,
+ successfully_recovered = CleanShutdown,
+ file_size_limit = FileSizeLimit,
+ cref_to_msg_ids = #{},
+ credit_disc_bound = CreditDiscBound
+ },
+ %% If we didn't recover the msg location index then we need to
+ %% rebuild it now.
+ Cleanliness = case CleanShutdown of
+ true -> "clean";
+ false -> "unclean"
+ end,
+ rabbit_log:debug("Rebuilding message location index after ~s shutdown...~n",
+ [Cleanliness]),
+ {Offset, State1 = #msstate { current_file = CurFile }} =
+ build_index(CleanShutdown, StartupFunState, State),
+ rabbit_log:debug("Finished rebuilding index~n", []),
+ %% read is only needed so that we can seek
+ {ok, CurHdl} = open_file(Dir, filenum_to_name(CurFile),
+ [read | ?WRITE_MODE]),
+ {ok, Offset} = file_handle_cache:position(CurHdl, Offset),
+ ok = file_handle_cache:truncate(CurHdl),
+
+ {ok, maybe_compact(State1 #msstate { current_file_handle = CurHdl }),
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ successfully_recovered_state -> 7;
+ {new_client_state, _Ref, _Pid, _MODC, _CloseFDsFun} -> 7;
+ {read, _MsgId} -> 2;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {combine_files, _Source, _Destination, _Reclaimed} -> 8;
+ {delete_file, _File, _Reclaimed} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {client_dying, _Pid} -> 7;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ sync -> 8;
+ _ -> 0
+ end.
+
+handle_call(successfully_recovered_state, _From, State) ->
+ reply(State #msstate.successfully_recovered, State);
+
+handle_call({new_client_state, CRef, CPid, MsgOnDiskFun, CloseFDsFun}, _From,
+ State = #msstate { dir = Dir,
+ index_state = IndexState,
+ index_module = IndexModule,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ clients = Clients,
+ gc_pid = GCPid }) ->
+ Clients1 = maps:put(CRef, {CPid, MsgOnDiskFun, CloseFDsFun}, Clients),
+ erlang:monitor(process, CPid),
+ reply({IndexState, IndexModule, Dir, GCPid, FileHandlesEts, FileSummaryEts,
+ CurFileCacheEts, FlyingEts},
+ State #msstate { clients = Clients1 });
+
+handle_call({client_terminate, CRef}, _From, State) ->
+ reply(ok, clear_client(CRef, State));
+
+handle_call({read, MsgId}, From, State) ->
+ State1 = read_message(MsgId, From, State),
+ noreply(State1);
+
+handle_call({contains, MsgId}, From, State) ->
+ State1 = contains_message(MsgId, From, State),
+ noreply(State1).
+
+handle_cast({client_dying, CRef},
+ State = #msstate { dying_clients = DyingClients,
+ current_file_handle = CurHdl,
+ current_file = CurFile }) ->
+ {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl),
+ DyingClients1 = maps:put(CRef,
+ #dying_client{client_ref = CRef,
+ file = CurFile,
+ offset = CurOffset},
+ DyingClients),
+ noreply(State #msstate { dying_clients = DyingClients1 });
+
+handle_cast({client_delete, CRef},
+ State = #msstate { clients = Clients }) ->
+ State1 = State #msstate { clients = maps:remove(CRef, Clients) },
+ noreply(clear_client(CRef, State1));
+
+handle_cast({write, CRef, MsgId, Flow},
+ State = #msstate { cur_file_cache_ets = CurFileCacheEts,
+ clients = Clients,
+ credit_disc_bound = CreditDiscBound }) ->
+ case Flow of
+ flow -> {CPid, _, _} = maps:get(CRef, Clients),
+ %% We are going to process a message sent by the
+ %% rabbit_amqqueue_process. Now we are accessing the
+ %% msg_store process dictionary.
+ credit_flow:ack(CPid, CreditDiscBound);
+ noflow -> ok
+ end,
+ true = 0 =< ets:update_counter(CurFileCacheEts, MsgId, {3, -1}),
+ case update_flying(-1, MsgId, CRef, State) of
+ process ->
+ [{MsgId, Msg, _PWC}] = ets:lookup(CurFileCacheEts, MsgId),
+ noreply(write_message(MsgId, Msg, CRef, State));
+ ignore ->
+ %% A 'remove' has already been issued and eliminated the
+ %% 'write'.
+ State1 = blind_confirm(CRef, gb_sets:singleton(MsgId),
+ ignored, State),
+ %% If all writes get eliminated, cur_file_cache_ets could
+ %% grow unbounded. To prevent that we delete the cache
+ %% entry here, but only if the message isn't in the
+ %% current file. That way reads of the message can
+ %% continue to be done client side, from either the cache
+ %% or the non-current files. If the message *is* in the
+ %% current file then the cache entry will be removed by
+ %% the normal logic for that in write_message/4 and
+ %% maybe_roll_to_new_file/2.
+ case index_lookup(MsgId, State1) of
+ [#msg_location { file = File }]
+ when File == State1 #msstate.current_file ->
+ ok;
+ _ ->
+ true = ets:match_delete(CurFileCacheEts, {MsgId, '_', 0})
+ end,
+ noreply(State1)
+ end;
+
+handle_cast({remove, CRef, MsgIds}, State) ->
+ {RemovedMsgIds, State1} =
+ lists:foldl(
+ fun (MsgId, {Removed, State2}) ->
+ case update_flying(+1, MsgId, CRef, State2) of
+ process -> {[MsgId | Removed],
+ remove_message(MsgId, CRef, State2)};
+ ignore -> {Removed, State2}
+ end
+ end, {[], State}, MsgIds),
+ noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(RemovedMsgIds),
+ ignored, State1)));
+
+handle_cast({combine_files, Source, Destination, Reclaimed},
+ State = #msstate { sum_file_size = SumFileSize,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ clients = Clients }) ->
+ ok = cleanup_after_file_deletion(Source, State),
+ %% see comment in cleanup_after_file_deletion, and client_read3
+ true = mark_handle_to_close(Clients, FileHandlesEts, Destination, false),
+ true = ets:update_element(FileSummaryEts, Destination,
+ {#file_summary.locked, false}),
+ State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed },
+ noreply(maybe_compact(run_pending([Source, Destination], State1)));
+
+handle_cast({delete_file, File, Reclaimed},
+ State = #msstate { sum_file_size = SumFileSize }) ->
+ ok = cleanup_after_file_deletion(File, State),
+ State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed },
+ noreply(maybe_compact(run_pending([File], State1)));
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State).
+
+handle_info(sync, State) ->
+ noreply(internal_sync(State));
+
+handle_info(timeout, State) ->
+ noreply(internal_sync(State));
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) ->
+ %% similar to what happens in
+ %% rabbit_amqqueue_process:handle_ch_down but with a relation of
+ %% msg_store -> rabbit_amqqueue_process instead of
+ %% rabbit_amqqueue_process -> rabbit_channel.
+ credit_flow:peer_down(Pid),
+ noreply(State);
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State}.
+
+terminate(_Reason, State = #msstate { index_state = IndexState,
+ index_module = IndexModule,
+ current_file_handle = CurHdl,
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ clients = Clients,
+ dir = Dir }) ->
+ rabbit_log:info("Stopping message store for directory '~s'", [Dir]),
+ %% stop the gc first, otherwise it could be working and we pull
+ %% out the ets tables from under it.
+ ok = rabbit_msg_store_gc:stop(GCPid),
+ State1 = case CurHdl of
+ undefined -> State;
+ _ -> State2 = internal_sync(State),
+ ok = file_handle_cache:close(CurHdl),
+ State2
+ end,
+ State3 = close_all_handles(State1),
+ case store_file_summary(FileSummaryEts, Dir) of
+ ok -> ok;
+ {error, FSErr} ->
+ rabbit_log:error("Unable to store file summary"
+ " for vhost message store for directory ~p~n"
+ "Error: ~p~n",
+ [Dir, FSErr])
+ end,
+ [true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts,
+ CurFileCacheEts, FlyingEts]],
+ IndexModule:terminate(IndexState),
+ case store_recovery_terms([{client_refs, maps:keys(Clients)},
+ {index_module, IndexModule}], Dir) of
+ ok ->
+ rabbit_log:info("Message store for directory '~s' is stopped", [Dir]),
+ ok;
+ {error, RTErr} ->
+ rabbit_log:error("Unable to save message store recovery terms"
+ " for directory ~p~nError: ~p~n",
+ [Dir, RTErr])
+ end,
+ State3 #msstate { index_state = undefined,
+ current_file_handle = undefined }.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+%%----------------------------------------------------------------------------
+%% general helper functions
+%%----------------------------------------------------------------------------
+
+noreply(State) ->
+ {State1, Timeout} = next_state(State),
+ {noreply, State1, Timeout}.
+
+reply(Reply, State) ->
+ {State1, Timeout} = next_state(State),
+ {reply, Reply, State1, Timeout}.
+
+next_state(State = #msstate { sync_timer_ref = undefined,
+ cref_to_msg_ids = CTM }) ->
+ case maps:size(CTM) of
+ 0 -> {State, hibernate};
+ _ -> {start_sync_timer(State), 0}
+ end;
+next_state(State = #msstate { cref_to_msg_ids = CTM }) ->
+ case maps:size(CTM) of
+ 0 -> {stop_sync_timer(State), hibernate};
+ _ -> {State, 0}
+ end.
+
+start_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #msstate.sync_timer_ref,
+ ?SYNC_INTERVAL, sync).
+
+stop_sync_timer(State) ->
+ rabbit_misc:stop_timer(State, #msstate.sync_timer_ref).
+
+internal_sync(State = #msstate { current_file_handle = CurHdl,
+ cref_to_msg_ids = CTM }) ->
+ State1 = stop_sync_timer(State),
+ CGs = maps:fold(fun (CRef, MsgIds, NS) ->
+ case gb_sets:is_empty(MsgIds) of
+ true -> NS;
+ false -> [{CRef, MsgIds} | NS]
+ end
+ end, [], CTM),
+ ok = case CGs of
+ [] -> ok;
+ _ -> file_handle_cache:sync(CurHdl)
+ end,
+ lists:foldl(fun ({CRef, MsgIds}, StateN) ->
+ client_confirm(CRef, MsgIds, written, StateN)
+ end, State1, CGs).
+
+update_flying(Diff, MsgId, CRef, #msstate { flying_ets = FlyingEts }) ->
+ Key = {MsgId, CRef},
+ NDiff = -Diff,
+ case ets:lookup(FlyingEts, Key) of
+ [] -> ignore;
+ [{_, Diff}] -> ignore; %% [1]
+ [{_, NDiff}] -> ets:update_counter(FlyingEts, Key, {2, Diff}),
+ true = ets:delete_object(FlyingEts, {Key, 0}),
+ process;
+ [{_, 0}] -> true = ets:delete_object(FlyingEts, {Key, 0}),
+ ignore;
+ [{_, Err}] -> throw({bad_flying_ets_record, Diff, Err, Key})
+ end.
+%% [1] We can get here, for example, in the following scenario: There
+%% is a write followed by a remove in flight. The counter will be 0,
+%% so on processing the write the server attempts to delete the
+%% entry. If at that point the client injects another write it will
+%% either insert a new entry, containing +1, or increment the existing
+%% entry to +1, thus preventing its removal. Either way therefore when
+%% the server processes the read, the counter will be +1.
+
+write_action({true, not_found}, _MsgId, State) ->
+ {ignore, undefined, State};
+write_action({true, #msg_location { file = File }}, _MsgId, State) ->
+ {ignore, File, State};
+write_action({false, not_found}, _MsgId, State) ->
+ {write, State};
+write_action({Mask, #msg_location { ref_count = 0, file = File,
+ total_size = TotalSize }},
+ MsgId, State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ case {Mask, ets:lookup(FileSummaryEts, File)} of
+ {false, [#file_summary { locked = true }]} ->
+ ok = index_delete(MsgId, State),
+ {write, State};
+ {false_if_increment, [#file_summary { locked = true }]} ->
+ %% The msg for MsgId is older than the client death
+ %% message, but as it is being GC'd currently we'll have
+ %% to write a new copy, which will then be younger, so
+ %% ignore this write.
+ {ignore, File, State};
+ {_Mask, [#file_summary {}]} ->
+ ok = index_update_ref_count(MsgId, 1, State),
+ State1 = adjust_valid_total_size(File, TotalSize, State),
+ {confirm, File, State1}
+ end;
+write_action({_Mask, #msg_location { ref_count = RefCount, file = File }},
+ MsgId, State) ->
+ ok = index_update_ref_count(MsgId, RefCount + 1, State),
+ %% We already know about it, just update counter. Only update
+ %% field otherwise bad interaction with concurrent GC
+ {confirm, File, State}.
+
+write_message(MsgId, Msg, CRef,
+ State = #msstate { cur_file_cache_ets = CurFileCacheEts }) ->
+ case write_action(should_mask_action(CRef, MsgId, State), MsgId, State) of
+ {write, State1} ->
+ write_message(MsgId, Msg,
+ record_pending_confirm(CRef, MsgId, State1));
+ {ignore, CurFile, State1 = #msstate { current_file = CurFile }} ->
+ State1;
+ {ignore, _File, State1} ->
+ true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}),
+ State1;
+ {confirm, CurFile, State1 = #msstate { current_file = CurFile }}->
+ record_pending_confirm(CRef, MsgId, State1);
+ {confirm, _File, State1} ->
+ true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}),
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) ->
+ MsgOnDiskFun(gb_sets:singleton(MsgId), written),
+ CTM
+ end, CRef, State1)
+ end.
+
+remove_message(MsgId, CRef,
+ State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ case should_mask_action(CRef, MsgId, State) of
+ {true, _Location} ->
+ State;
+ {false_if_increment, #msg_location { ref_count = 0 }} ->
+ %% CRef has tried to both write and remove this msg whilst
+ %% it's being GC'd.
+ %%
+ %% ASSERTION: [#file_summary { locked = true }] =
+ %% ets:lookup(FileSummaryEts, File),
+ State;
+ {_Mask, #msg_location { ref_count = RefCount, file = File,
+ total_size = TotalSize }}
+ when RefCount > 0 ->
+ %% only update field, otherwise bad interaction with
+ %% concurrent GC
+ Dec = fun () -> index_update_ref_count(
+ MsgId, RefCount - 1, State) end,
+ case RefCount of
+ %% don't remove from cur_file_cache_ets here because
+ %% there may be further writes in the mailbox for the
+ %% same msg.
+ 1 -> case ets:lookup(FileSummaryEts, File) of
+ [#file_summary { locked = true }] ->
+ add_to_pending_gc_completion(
+ {remove, MsgId, CRef}, File, State);
+ [#file_summary {}] ->
+ ok = Dec(),
+ delete_file_if_empty(
+ File, adjust_valid_total_size(
+ File, -TotalSize, State))
+ end;
+ _ -> ok = Dec(),
+ State
+ end
+ end.
+
+write_message(MsgId, Msg,
+ State = #msstate { current_file_handle = CurHdl,
+ current_file = CurFile,
+ sum_valid_data = SumValid,
+ sum_file_size = SumFileSize,
+ file_summary_ets = FileSummaryEts }) ->
+ {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl),
+ {ok, TotalSize} = rabbit_msg_file:append(CurHdl, MsgId, Msg),
+ ok = index_insert(
+ #msg_location { msg_id = MsgId, ref_count = 1, file = CurFile,
+ offset = CurOffset, total_size = TotalSize }, State),
+ [#file_summary { right = undefined, locked = false }] =
+ ets:lookup(FileSummaryEts, CurFile),
+ [_,_] = ets:update_counter(FileSummaryEts, CurFile,
+ [{#file_summary.valid_total_size, TotalSize},
+ {#file_summary.file_size, TotalSize}]),
+ maybe_roll_to_new_file(CurOffset + TotalSize,
+ State #msstate {
+ sum_valid_data = SumValid + TotalSize,
+ sum_file_size = SumFileSize + TotalSize }).
+
+read_message(MsgId, From, State) ->
+ case index_lookup_positive_ref_count(MsgId, State) of
+ not_found -> gen_server2:reply(From, not_found),
+ State;
+ MsgLocation -> read_message1(From, MsgLocation, State)
+ end.
+
+read_message1(From, #msg_location { msg_id = MsgId, file = File,
+ offset = Offset } = MsgLoc,
+ State = #msstate { current_file = CurFile,
+ current_file_handle = CurHdl,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts }) ->
+ case File =:= CurFile of
+ true -> {Msg, State1} =
+ %% can return [] if msg in file existed on startup
+ case ets:lookup(CurFileCacheEts, MsgId) of
+ [] ->
+ {ok, RawOffSet} =
+ file_handle_cache:current_raw_offset(CurHdl),
+ ok = case Offset >= RawOffSet of
+ true -> file_handle_cache:flush(CurHdl);
+ false -> ok
+ end,
+ read_from_disk(MsgLoc, State);
+ [{MsgId, Msg1, _CacheRefCount}] ->
+ {Msg1, State}
+ end,
+ gen_server2:reply(From, {ok, Msg}),
+ State1;
+ false -> [#file_summary { locked = Locked }] =
+ ets:lookup(FileSummaryEts, File),
+ case Locked of
+ true -> add_to_pending_gc_completion({read, MsgId, From},
+ File, State);
+ false -> {Msg, State1} = read_from_disk(MsgLoc, State),
+ gen_server2:reply(From, {ok, Msg}),
+ State1
+ end
+ end.
+
+read_from_disk(#msg_location { msg_id = MsgId, file = File, offset = Offset,
+ total_size = TotalSize }, State) ->
+ {Hdl, State1} = get_read_handle(File, State),
+ {ok, Offset} = file_handle_cache:position(Hdl, Offset),
+ {ok, {MsgId, Msg}} =
+ case rabbit_msg_file:read(Hdl, TotalSize) of
+ {ok, {MsgId, _}} = Obj ->
+ Obj;
+ Rest ->
+ {error, {misread, [{old_state, State},
+ {file_num, File},
+ {offset, Offset},
+ {msg_id, MsgId},
+ {read, Rest},
+ {proc_dict, get()}
+ ]}}
+ end,
+ {Msg, State1}.
+
+contains_message(MsgId, From,
+ State = #msstate { pending_gc_completion = Pending }) ->
+ case index_lookup_positive_ref_count(MsgId, State) of
+ not_found ->
+ gen_server2:reply(From, false),
+ State;
+ #msg_location { file = File } ->
+ case maps:is_key(File, Pending) of
+ true -> add_to_pending_gc_completion(
+ {contains, MsgId, From}, File, State);
+ false -> gen_server2:reply(From, true),
+ State
+ end
+ end.
+
+add_to_pending_gc_completion(
+ Op, File, State = #msstate { pending_gc_completion = Pending }) ->
+ State #msstate { pending_gc_completion =
+ rabbit_misc:maps_cons(File, Op, Pending) }.
+
+run_pending(Files, State) ->
+ lists:foldl(
+ fun (File, State1 = #msstate { pending_gc_completion = Pending }) ->
+ Pending1 = maps:remove(File, Pending),
+ lists:foldl(
+ fun run_pending_action/2,
+ State1 #msstate { pending_gc_completion = Pending1 },
+ lists:reverse(maps:get(File, Pending)))
+ end, State, Files).
+
+run_pending_action({read, MsgId, From}, State) ->
+ read_message(MsgId, From, State);
+run_pending_action({contains, MsgId, From}, State) ->
+ contains_message(MsgId, From, State);
+run_pending_action({remove, MsgId, CRef}, State) ->
+ remove_message(MsgId, CRef, State).
+
+safe_ets_update_counter(Tab, Key, UpdateOp, SuccessFun, FailThunk) ->
+ try
+ SuccessFun(ets:update_counter(Tab, Key, UpdateOp))
+ catch error:badarg -> FailThunk()
+ end.
+
+update_msg_cache(CacheEts, MsgId, Msg) ->
+ case ets:insert_new(CacheEts, {MsgId, Msg, 1}) of
+ true -> ok;
+ false -> safe_ets_update_counter(
+ CacheEts, MsgId, {3, +1}, fun (_) -> ok end,
+ fun () -> update_msg_cache(CacheEts, MsgId, Msg) end)
+ end.
+
+adjust_valid_total_size(File, Delta, State = #msstate {
+ sum_valid_data = SumValid,
+ file_summary_ets = FileSummaryEts }) ->
+ [_] = ets:update_counter(FileSummaryEts, File,
+ [{#file_summary.valid_total_size, Delta}]),
+ State #msstate { sum_valid_data = SumValid + Delta }.
+
+maps_store(Key, Val, Dict) ->
+ false = maps:is_key(Key, Dict),
+ maps:put(Key, Val, Dict).
+
+update_pending_confirms(Fun, CRef,
+ State = #msstate { clients = Clients,
+ cref_to_msg_ids = CTM }) ->
+ case maps:get(CRef, Clients) of
+ {_CPid, undefined, _CloseFDsFun} -> State;
+ {_CPid, MsgOnDiskFun, _CloseFDsFun} -> CTM1 = Fun(MsgOnDiskFun, CTM),
+ State #msstate {
+ cref_to_msg_ids = CTM1 }
+ end.
+
+record_pending_confirm(CRef, MsgId, State) ->
+ update_pending_confirms(
+ fun (_MsgOnDiskFun, CTM) ->
+ NewMsgIds = case maps:find(CRef, CTM) of
+ error -> gb_sets:singleton(MsgId);
+ {ok, MsgIds} -> gb_sets:add(MsgId, MsgIds)
+ end,
+ maps:put(CRef, NewMsgIds, CTM)
+ end, CRef, State).
+
+client_confirm(CRef, MsgIds, ActionTaken, State) ->
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) ->
+ case maps:find(CRef, CTM) of
+ {ok, Gs} -> MsgOnDiskFun(gb_sets:intersection(Gs, MsgIds),
+ ActionTaken),
+ MsgIds1 = rabbit_misc:gb_sets_difference(
+ Gs, MsgIds),
+ case gb_sets:is_empty(MsgIds1) of
+ true -> maps:remove(CRef, CTM);
+ false -> maps:put(CRef, MsgIds1, CTM)
+ end;
+ error -> CTM
+ end
+ end, CRef, State).
+
+blind_confirm(CRef, MsgIds, ActionTaken, State) ->
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) -> MsgOnDiskFun(MsgIds, ActionTaken), CTM end,
+ CRef, State).
+
+%% Detect whether the MsgId is older or younger than the client's death
+%% msg (if there is one). If the msg is older than the client death
+%% msg, and it has a 0 ref_count we must only alter the ref_count, not
+%% rewrite the msg - rewriting it would make it younger than the death
+%% msg and thus should be ignored. Note that this (correctly) returns
+%% false when testing to remove the death msg itself.
+should_mask_action(CRef, MsgId,
+ State = #msstate{dying_clients = DyingClients}) ->
+ case {maps:find(CRef, DyingClients), index_lookup(MsgId, State)} of
+ {error, Location} ->
+ {false, Location};
+ {{ok, _}, not_found} ->
+ {true, not_found};
+ {{ok, Client}, #msg_location { file = File, offset = Offset,
+ ref_count = RefCount } = Location} ->
+ #dying_client{file = DeathFile, offset = DeathOffset} = Client,
+ {case {{DeathFile, DeathOffset} < {File, Offset}, RefCount} of
+ {true, _} -> true;
+ {false, 0} -> false_if_increment;
+ {false, _} -> false
+ end, Location}
+ end.
+
+%%----------------------------------------------------------------------------
+%% file helper functions
+%%----------------------------------------------------------------------------
+
+open_file(File, Mode) ->
+ file_handle_cache:open_with_absolute_path(
+ File, ?BINARY_MODE ++ Mode,
+ [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE},
+ {read_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]).
+
+open_file(Dir, FileName, Mode) ->
+ open_file(form_filename(Dir, FileName), Mode).
+
+close_handle(Key, CState = #client_msstate { file_handle_cache = FHC }) ->
+ CState #client_msstate { file_handle_cache = close_handle(Key, FHC) };
+
+close_handle(Key, State = #msstate { file_handle_cache = FHC }) ->
+ State #msstate { file_handle_cache = close_handle(Key, FHC) };
+
+close_handle(Key, FHC) ->
+ case maps:find(Key, FHC) of
+ {ok, Hdl} -> ok = file_handle_cache:close(Hdl),
+ maps:remove(Key, FHC);
+ error -> FHC
+ end.
+
+mark_handle_open(FileHandlesEts, File, Ref) ->
+ %% This is fine to fail (already exists). Note it could fail with
+ %% the value being close, and not have it updated to open.
+ ets:insert_new(FileHandlesEts, {{Ref, File}, open}),
+ true.
+
+%% See comment in client_read3 - only call this when the file is locked
+mark_handle_to_close(ClientRefs, FileHandlesEts, File, Invoke) ->
+ [ begin
+ case (ets:update_element(FileHandlesEts, Key, {2, close})
+ andalso Invoke) of
+ true -> case maps:get(Ref, ClientRefs) of
+ {_CPid, _MsgOnDiskFun, undefined} ->
+ ok;
+ {_CPid, _MsgOnDiskFun, CloseFDsFun} ->
+ ok = CloseFDsFun()
+ end;
+ false -> ok
+ end
+ end || {{Ref, _File} = Key, open} <-
+ ets:match_object(FileHandlesEts, {{'_', File}, open}) ],
+ true.
+
+safe_file_delete_fun(File, Dir, FileHandlesEts) ->
+ fun () -> safe_file_delete(File, Dir, FileHandlesEts) end.
+
+safe_file_delete(File, Dir, FileHandlesEts) ->
+ %% do not match on any value - it's the absence of the row that
+ %% indicates the client has really closed the file.
+ case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of
+ {[_|_], _Cont} -> false;
+ _ -> ok = file:delete(
+ form_filename(Dir, filenum_to_name(File))),
+ true
+ end.
+
+-spec close_all_indicated
+ (client_msstate()) -> rabbit_types:ok(client_msstate()).
+
+close_all_indicated(#client_msstate { file_handles_ets = FileHandlesEts,
+ client_ref = Ref } =
+ CState) ->
+ Objs = ets:match_object(FileHandlesEts, {{Ref, '_'}, close}),
+ {ok, lists:foldl(fun ({Key = {_Ref, File}, close}, CStateM) ->
+ true = ets:delete(FileHandlesEts, Key),
+ close_handle(File, CStateM)
+ end, CState, Objs)}.
+
+close_all_handles(CState = #client_msstate { file_handles_ets = FileHandlesEts,
+ file_handle_cache = FHC,
+ client_ref = Ref }) ->
+ ok = maps:fold(fun (File, Hdl, ok) ->
+ true = ets:delete(FileHandlesEts, {Ref, File}),
+ file_handle_cache:close(Hdl)
+ end, ok, FHC),
+ CState #client_msstate { file_handle_cache = #{} };
+
+close_all_handles(State = #msstate { file_handle_cache = FHC }) ->
+ ok = maps:fold(fun (_Key, Hdl, ok) -> file_handle_cache:close(Hdl) end,
+ ok, FHC),
+ State #msstate { file_handle_cache = #{} }.
+
+get_read_handle(FileNum, CState = #client_msstate { file_handle_cache = FHC,
+ dir = Dir }) ->
+ {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir),
+ {Hdl, CState #client_msstate { file_handle_cache = FHC2 }};
+
+get_read_handle(FileNum, State = #msstate { file_handle_cache = FHC,
+ dir = Dir }) ->
+ {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir),
+ {Hdl, State #msstate { file_handle_cache = FHC2 }}.
+
+get_read_handle(FileNum, FHC, Dir) ->
+ case maps:find(FileNum, FHC) of
+ {ok, Hdl} -> {Hdl, FHC};
+ error -> {ok, Hdl} = open_file(Dir, filenum_to_name(FileNum),
+ ?READ_MODE),
+ {Hdl, maps:put(FileNum, Hdl, FHC)}
+ end.
+
+preallocate(Hdl, FileSizeLimit, FinalPos) ->
+ {ok, FileSizeLimit} = file_handle_cache:position(Hdl, FileSizeLimit),
+ ok = file_handle_cache:truncate(Hdl),
+ {ok, FinalPos} = file_handle_cache:position(Hdl, FinalPos),
+ ok.
+
+truncate_and_extend_file(Hdl, Lowpoint, Highpoint) ->
+ {ok, Lowpoint} = file_handle_cache:position(Hdl, Lowpoint),
+ ok = file_handle_cache:truncate(Hdl),
+ ok = preallocate(Hdl, Highpoint, Lowpoint).
+
+form_filename(Dir, Name) -> filename:join(Dir, Name).
+
+filenum_to_name(File) -> integer_to_list(File) ++ ?FILE_EXTENSION.
+
+filename_to_num(FileName) -> list_to_integer(filename:rootname(FileName)).
+
+list_sorted_filenames(Dir, Ext) ->
+ lists:sort(fun (A, B) -> filename_to_num(A) < filename_to_num(B) end,
+ filelib:wildcard("*" ++ Ext, Dir)).
+
+%%----------------------------------------------------------------------------
+%% index
+%%----------------------------------------------------------------------------
+
+index_lookup_positive_ref_count(Key, State) ->
+ case index_lookup(Key, State) of
+ not_found -> not_found;
+ #msg_location { ref_count = 0 } -> not_found;
+ #msg_location {} = MsgLocation -> MsgLocation
+ end.
+
+index_update_ref_count(Key, RefCount, State) ->
+ index_update_fields(Key, {#msg_location.ref_count, RefCount}, State).
+
+index_lookup(Key, #gc_state { index_module = Index,
+ index_state = State }) ->
+ Index:lookup(Key, State);
+
+index_lookup(Key, #client_msstate { index_module = Index,
+ index_state = State }) ->
+ Index:lookup(Key, State);
+
+index_lookup(Key, #msstate { index_module = Index, index_state = State }) ->
+ Index:lookup(Key, State).
+
+index_insert(Obj, #msstate { index_module = Index, index_state = State }) ->
+ Index:insert(Obj, State).
+
+index_update(Obj, #msstate { index_module = Index, index_state = State }) ->
+ Index:update(Obj, State).
+
+index_update_fields(Key, Updates, #msstate{ index_module = Index,
+ index_state = State }) ->
+ Index:update_fields(Key, Updates, State);
+index_update_fields(Key, Updates, #gc_state{ index_module = Index,
+ index_state = State }) ->
+ Index:update_fields(Key, Updates, State).
+
+index_delete(Key, #msstate { index_module = Index, index_state = State }) ->
+ Index:delete(Key, State).
+
+index_delete_object(Obj, #gc_state{ index_module = Index,
+ index_state = State }) ->
+ Index:delete_object(Obj, State).
+
+index_clean_up_temporary_reference_count_entries(
+ #msstate { index_module = Index,
+ index_state = State }) ->
+ Index:clean_up_temporary_reference_count_entries_without_file(State).
+
+%%----------------------------------------------------------------------------
+%% shutdown and recovery
+%%----------------------------------------------------------------------------
+
+recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Name) ->
+ {false, IndexModule:new(Dir), []};
+recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Name) ->
+ rabbit_log:warning("Message store ~tp: rebuilding indices from scratch~n", [Name]),
+ {false, IndexModule:new(Dir), []};
+recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Name) ->
+ Fresh = fun (ErrorMsg, ErrorArgs) ->
+ rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n"
+ "rebuilding indices from scratch~n",
+ [Name | ErrorArgs]),
+ {false, IndexModule:new(Dir), []}
+ end,
+ case read_recovery_terms(Dir) of
+ {false, Error} ->
+ Fresh("failed to read recovery terms: ~p", [Error]);
+ {true, Terms} ->
+ RecClientRefs = proplists:get_value(client_refs, Terms, []),
+ RecIndexModule = proplists:get_value(index_module, Terms),
+ case (lists:sort(ClientRefs) =:= lists:sort(RecClientRefs)
+ andalso IndexModule =:= RecIndexModule) of
+ true -> case IndexModule:recover(Dir) of
+ {ok, IndexState1} ->
+ {true, IndexState1, ClientRefs};
+ {error, Error} ->
+ Fresh("failed to recover index: ~p", [Error])
+ end;
+ false -> Fresh("recovery terms differ from present", [])
+ end
+ end.
+
+store_recovery_terms(Terms, Dir) ->
+ rabbit_file:write_term_file(filename:join(Dir, ?CLEAN_FILENAME), Terms).
+
+read_recovery_terms(Dir) ->
+ Path = filename:join(Dir, ?CLEAN_FILENAME),
+ case rabbit_file:read_term_file(Path) of
+ {ok, Terms} -> case file:delete(Path) of
+ ok -> {true, Terms};
+ {error, Error} -> {false, Error}
+ end;
+ {error, Error} -> {false, Error}
+ end.
+
+store_file_summary(Tid, Dir) ->
+ ets:tab2file(Tid, filename:join(Dir, ?FILE_SUMMARY_FILENAME),
+ [{extended_info, [object_count]}]).
+
+recover_file_summary(false, _Dir) ->
+ %% TODO: the only reason for this to be an *ordered*_set is so
+ %% that a) maybe_compact can start a traversal from the eldest
+ %% file, and b) build_index in fast recovery mode can easily
+ %% identify the current file. It's awkward to have both that
+ %% odering and the left/right pointers in the entries - replacing
+ %% the former with some additional bit of state would be easy, but
+ %% ditching the latter would be neater.
+ {false, ets:new(rabbit_msg_store_file_summary,
+ [ordered_set, public, {keypos, #file_summary.file}])};
+recover_file_summary(true, Dir) ->
+ Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME),
+ case ets:file2tab(Path) of
+ {ok, Tid} -> ok = file:delete(Path),
+ {true, Tid};
+ {error, _Error} -> recover_file_summary(false, Dir)
+ end.
+
+count_msg_refs(Gen, Seed, State) ->
+ case Gen(Seed) of
+ finished ->
+ ok;
+ {_MsgId, 0, Next} ->
+ count_msg_refs(Gen, Next, State);
+ {MsgId, Delta, Next} ->
+ ok = case index_lookup(MsgId, State) of
+ not_found ->
+ index_insert(#msg_location { msg_id = MsgId,
+ file = undefined,
+ ref_count = Delta },
+ State);
+ #msg_location { ref_count = RefCount } = StoreEntry ->
+ NewRefCount = RefCount + Delta,
+ case NewRefCount of
+ 0 -> index_delete(MsgId, State);
+ _ -> index_update(StoreEntry #msg_location {
+ ref_count = NewRefCount },
+ State)
+ end
+ end,
+ count_msg_refs(Gen, Next, State)
+ end.
+
+recover_crashed_compactions(Dir) ->
+ FileNames = list_sorted_filenames(Dir, ?FILE_EXTENSION),
+ TmpFileNames = list_sorted_filenames(Dir, ?FILE_EXTENSION_TMP),
+ lists:foreach(
+ fun (TmpFileName) ->
+ NonTmpRelatedFileName =
+ filename:rootname(TmpFileName) ++ ?FILE_EXTENSION,
+ true = lists:member(NonTmpRelatedFileName, FileNames),
+ ok = recover_crashed_compaction(
+ Dir, TmpFileName, NonTmpRelatedFileName)
+ end, TmpFileNames),
+ TmpFileNames == [].
+
+recover_crashed_compaction(Dir, TmpFileName, NonTmpRelatedFileName) ->
+ %% Because a msg can legitimately appear multiple times in the
+ %% same file, identifying the contents of the tmp file and where
+ %% they came from is non-trivial. If we are recovering a crashed
+ %% compaction then we will be rebuilding the index, which can cope
+ %% with duplicates appearing. Thus the simplest and safest thing
+ %% to do is to append the contents of the tmp file to its main
+ %% file.
+ {ok, TmpHdl} = open_file(Dir, TmpFileName, ?READ_MODE),
+ {ok, MainHdl} = open_file(Dir, NonTmpRelatedFileName,
+ ?READ_MODE ++ ?WRITE_MODE),
+ {ok, _End} = file_handle_cache:position(MainHdl, eof),
+ Size = filelib:file_size(form_filename(Dir, TmpFileName)),
+ {ok, Size} = file_handle_cache:copy(TmpHdl, MainHdl, Size),
+ ok = file_handle_cache:close(MainHdl),
+ ok = file_handle_cache:delete(TmpHdl),
+ ok.
+
+scan_file_for_valid_messages(File) ->
+ case open_file(File, ?READ_MODE) of
+ {ok, Hdl} -> Valid = rabbit_msg_file:scan(
+ Hdl, filelib:file_size(File),
+ fun scan_fun/2, []),
+ ok = file_handle_cache:close(Hdl),
+ Valid;
+ {error, enoent} -> {ok, [], 0};
+ {error, Reason} -> {error, {unable_to_scan_file,
+ filename:basename(File),
+ Reason}}
+ end.
+
+scan_file_for_valid_messages(Dir, FileName) ->
+ scan_file_for_valid_messages(form_filename(Dir, FileName)).
+
+scan_fun({MsgId, TotalSize, Offset, _Msg}, Acc) ->
+ [{MsgId, TotalSize, Offset} | Acc].
+
+%% Takes the list in *ascending* order (i.e. eldest message
+%% first). This is the opposite of what scan_file_for_valid_messages
+%% produces. The list of msgs that is produced is youngest first.
+drop_contiguous_block_prefix(L) -> drop_contiguous_block_prefix(L, 0).
+
+drop_contiguous_block_prefix([], ExpectedOffset) ->
+ {ExpectedOffset, []};
+drop_contiguous_block_prefix([#msg_location { offset = ExpectedOffset,
+ total_size = TotalSize } | Tail],
+ ExpectedOffset) ->
+ ExpectedOffset1 = ExpectedOffset + TotalSize,
+ drop_contiguous_block_prefix(Tail, ExpectedOffset1);
+drop_contiguous_block_prefix(MsgsAfterGap, ExpectedOffset) ->
+ {ExpectedOffset, MsgsAfterGap}.
+
+build_index(true, _StartupFunState,
+ State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ ets:foldl(
+ fun (#file_summary { valid_total_size = ValidTotalSize,
+ file_size = FileSize,
+ file = File },
+ {_Offset, State1 = #msstate { sum_valid_data = SumValid,
+ sum_file_size = SumFileSize }}) ->
+ {FileSize, State1 #msstate {
+ sum_valid_data = SumValid + ValidTotalSize,
+ sum_file_size = SumFileSize + FileSize,
+ current_file = File }}
+ end, {0, State}, FileSummaryEts);
+build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
+ State = #msstate { dir = Dir }) ->
+ rabbit_log:debug("Rebuilding message refcount...~n", []),
+ ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State),
+ rabbit_log:debug("Done rebuilding message refcount~n", []),
+ {ok, Pid} = gatherer:start_link(),
+ case [filename_to_num(FileName) ||
+ FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of
+ [] -> rebuild_index(Pid, [State #msstate.current_file],
+ State);
+ Files -> {Offset, State1} = rebuild_index(Pid, Files, State),
+ {Offset, lists:foldl(fun delete_file_if_empty/2,
+ State1, Files)}
+ end.
+
+build_index_worker(Gatherer, State = #msstate { dir = Dir },
+ Left, File, Files) ->
+ FileName = filenum_to_name(File),
+ rabbit_log:debug("Rebuilding message location index from ~p (~B file(s) remaining)~n",
+ [form_filename(Dir, FileName), length(Files)]),
+ {ok, Messages, FileSize} =
+ scan_file_for_valid_messages(Dir, FileName),
+ {ValidMessages, ValidTotalSize} =
+ lists:foldl(
+ fun (Obj = {MsgId, TotalSize, Offset}, {VMAcc, VTSAcc}) ->
+ case index_lookup(MsgId, State) of
+ #msg_location { file = undefined } = StoreEntry ->
+ ok = index_update(StoreEntry #msg_location {
+ file = File, offset = Offset,
+ total_size = TotalSize },
+ State),
+ {[Obj | VMAcc], VTSAcc + TotalSize};
+ _ ->
+ {VMAcc, VTSAcc}
+ end
+ end, {[], 0}, Messages),
+ {Right, FileSize1} =
+ case Files of
+ %% if it's the last file, we'll truncate to remove any
+ %% rubbish above the last valid message. This affects the
+ %% file size.
+ [] -> {undefined, case ValidMessages of
+ [] -> 0;
+ _ -> {_MsgId, TotalSize, Offset} =
+ lists:last(ValidMessages),
+ Offset + TotalSize
+ end};
+ [F|_] -> {F, FileSize}
+ end,
+ ok = gatherer:in(Gatherer, #file_summary {
+ file = File,
+ valid_total_size = ValidTotalSize,
+ left = Left,
+ right = Right,
+ file_size = FileSize1,
+ locked = false,
+ readers = 0 }),
+ ok = gatherer:finish(Gatherer).
+
+enqueue_build_index_workers(_Gatherer, _Left, [], _State) ->
+ exit(normal);
+enqueue_build_index_workers(Gatherer, Left, [File|Files], State) ->
+ ok = worker_pool:dispatch_sync(
+ fun () ->
+ link(Gatherer),
+ ok = build_index_worker(Gatherer, State,
+ Left, File, Files),
+ unlink(Gatherer),
+ ok
+ end),
+ enqueue_build_index_workers(Gatherer, File, Files, State).
+
+reduce_index(Gatherer, LastFile,
+ State = #msstate { file_summary_ets = FileSummaryEts,
+ sum_valid_data = SumValid,
+ sum_file_size = SumFileSize }) ->
+ case gatherer:out(Gatherer) of
+ empty ->
+ ok = gatherer:stop(Gatherer),
+ ok = index_clean_up_temporary_reference_count_entries(State),
+ Offset = case ets:lookup(FileSummaryEts, LastFile) of
+ [] -> 0;
+ [#file_summary { file_size = FileSize }] -> FileSize
+ end,
+ {Offset, State #msstate { current_file = LastFile }};
+ {value, #file_summary { valid_total_size = ValidTotalSize,
+ file_size = FileSize } = FileSummary} ->
+ true = ets:insert_new(FileSummaryEts, FileSummary),
+ reduce_index(Gatherer, LastFile,
+ State #msstate {
+ sum_valid_data = SumValid + ValidTotalSize,
+ sum_file_size = SumFileSize + FileSize })
+ end.
+
+rebuild_index(Gatherer, Files, State) ->
+ lists:foreach(fun (_File) ->
+ ok = gatherer:fork(Gatherer)
+ end, Files),
+ Pid = spawn(
+ fun () ->
+ enqueue_build_index_workers(Gatherer, undefined,
+ Files, State)
+ end),
+ erlang:monitor(process, Pid),
+ reduce_index(Gatherer, lists:last(Files), State).
+
+%%----------------------------------------------------------------------------
+%% garbage collection / compaction / aggregation -- internal
+%%----------------------------------------------------------------------------
+
+maybe_roll_to_new_file(
+ Offset,
+ State = #msstate { dir = Dir,
+ current_file_handle = CurHdl,
+ current_file = CurFile,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ file_size_limit = FileSizeLimit })
+ when Offset >= FileSizeLimit ->
+ State1 = internal_sync(State),
+ ok = file_handle_cache:close(CurHdl),
+ NextFile = CurFile + 1,
+ {ok, NextHdl} = open_file(Dir, filenum_to_name(NextFile), ?WRITE_MODE),
+ true = ets:insert_new(FileSummaryEts, #file_summary {
+ file = NextFile,
+ valid_total_size = 0,
+ left = CurFile,
+ right = undefined,
+ file_size = 0,
+ locked = false,
+ readers = 0 }),
+ true = ets:update_element(FileSummaryEts, CurFile,
+ {#file_summary.right, NextFile}),
+ true = ets:match_delete(CurFileCacheEts, {'_', '_', 0}),
+ maybe_compact(State1 #msstate { current_file_handle = NextHdl,
+ current_file = NextFile });
+maybe_roll_to_new_file(_, State) ->
+ State.
+
+maybe_compact(State = #msstate { sum_valid_data = SumValid,
+ sum_file_size = SumFileSize,
+ gc_pid = GCPid,
+ pending_gc_completion = Pending,
+ file_summary_ets = FileSummaryEts,
+ file_size_limit = FileSizeLimit })
+ when SumFileSize > 2 * FileSizeLimit andalso
+ (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION ->
+ %% TODO: the algorithm here is sub-optimal - it may result in a
+ %% complete traversal of FileSummaryEts.
+ First = ets:first(FileSummaryEts),
+ case First =:= '$end_of_table' orelse
+ maps:size(Pending) >= ?MAXIMUM_SIMULTANEOUS_GC_FILES of
+ true ->
+ State;
+ false ->
+ case find_files_to_combine(FileSummaryEts, FileSizeLimit,
+ ets:lookup(FileSummaryEts, First)) of
+ not_found ->
+ State;
+ {Src, Dst} ->
+ Pending1 = maps_store(Dst, [],
+ maps_store(Src, [], Pending)),
+ State1 = close_handle(Src, close_handle(Dst, State)),
+ true = ets:update_element(FileSummaryEts, Src,
+ {#file_summary.locked, true}),
+ true = ets:update_element(FileSummaryEts, Dst,
+ {#file_summary.locked, true}),
+ ok = rabbit_msg_store_gc:combine(GCPid, Src, Dst),
+ State1 #msstate { pending_gc_completion = Pending1 }
+ end
+ end;
+maybe_compact(State) ->
+ State.
+
+find_files_to_combine(FileSummaryEts, FileSizeLimit,
+ [#file_summary { file = Dst,
+ valid_total_size = DstValid,
+ right = Src,
+ locked = DstLocked }]) ->
+ case Src of
+ undefined ->
+ not_found;
+ _ ->
+ [#file_summary { file = Src,
+ valid_total_size = SrcValid,
+ left = Dst,
+ right = SrcRight,
+ locked = SrcLocked }] = Next =
+ ets:lookup(FileSummaryEts, Src),
+ case SrcRight of
+ undefined -> not_found;
+ _ -> case (DstValid + SrcValid =< FileSizeLimit) andalso
+ (DstValid > 0) andalso (SrcValid > 0) andalso
+ not (DstLocked orelse SrcLocked) of
+ true -> {Src, Dst};
+ false -> find_files_to_combine(
+ FileSummaryEts, FileSizeLimit, Next)
+ end
+ end
+ end.
+
+delete_file_if_empty(File, State = #msstate { current_file = File }) ->
+ State;
+delete_file_if_empty(File, State = #msstate {
+ gc_pid = GCPid,
+ file_summary_ets = FileSummaryEts,
+ pending_gc_completion = Pending }) ->
+ [#file_summary { valid_total_size = ValidData,
+ locked = false }] =
+ ets:lookup(FileSummaryEts, File),
+ case ValidData of
+ %% don't delete the file_summary_ets entry for File here
+ %% because we could have readers which need to be able to
+ %% decrement the readers count.
+ 0 -> true = ets:update_element(FileSummaryEts, File,
+ {#file_summary.locked, true}),
+ ok = rabbit_msg_store_gc:delete(GCPid, File),
+ Pending1 = maps_store(File, [], Pending),
+ close_handle(File,
+ State #msstate { pending_gc_completion = Pending1 });
+ _ -> State
+ end.
+
+cleanup_after_file_deletion(File,
+ #msstate { file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ clients = Clients }) ->
+ %% Ensure that any clients that have open fhs to the file close
+ %% them before using them again. This has to be done here (given
+ %% it's done in the msg_store, and not the gc), and not when
+ %% starting up the GC, because if done when starting up the GC,
+ %% the client could find the close, and close and reopen the fh,
+ %% whilst the GC is waiting for readers to disappear, before it's
+ %% actually done the GC.
+ true = mark_handle_to_close(Clients, FileHandlesEts, File, true),
+ [#file_summary { left = Left,
+ right = Right,
+ locked = true,
+ readers = 0 }] = ets:lookup(FileSummaryEts, File),
+ %% We'll never delete the current file, so right is never undefined
+ true = Right =/= undefined, %% ASSERTION
+ true = ets:update_element(FileSummaryEts, Right,
+ {#file_summary.left, Left}),
+ %% ensure the double linked list is maintained
+ true = case Left of
+ undefined -> true; %% File is the eldest file (left-most)
+ _ -> ets:update_element(FileSummaryEts, Left,
+ {#file_summary.right, Right})
+ end,
+ true = ets:delete(FileSummaryEts, File),
+ ok.
+
+%%----------------------------------------------------------------------------
+%% garbage collection / compaction / aggregation -- external
+%%----------------------------------------------------------------------------
+
+-spec combine_files(non_neg_integer(), non_neg_integer(), gc_state()) ->
+ {ok, deletion_thunk()} | {defer, [non_neg_integer()]}.
+
+combine_files(Source, Destination,
+ State = #gc_state { file_summary_ets = FileSummaryEts }) ->
+ [#file_summary{locked = true} = SourceSummary] =
+ ets:lookup(FileSummaryEts, Source),
+
+ [#file_summary{locked = true} = DestinationSummary] =
+ ets:lookup(FileSummaryEts, Destination),
+
+ case {SourceSummary, DestinationSummary} of
+ {#file_summary{readers = 0}, #file_summary{readers = 0}} ->
+ {ok, do_combine_files(SourceSummary, DestinationSummary,
+ Source, Destination, State)};
+ _ ->
+ rabbit_log:debug("Asked to combine files ~p and ~p but they have active readers. Deferring.",
+ [Source, Destination]),
+ DeferredFiles = [FileSummary#file_summary.file
+ || FileSummary <- [SourceSummary, DestinationSummary],
+ FileSummary#file_summary.readers /= 0],
+ {defer, DeferredFiles}
+ end.
+
+do_combine_files(SourceSummary, DestinationSummary,
+ Source, Destination,
+ State = #gc_state { file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ dir = Dir,
+ msg_store = Server }) ->
+ #file_summary {
+ readers = 0,
+ left = Destination,
+ valid_total_size = SourceValid,
+ file_size = SourceFileSize,
+ locked = true } = SourceSummary,
+ #file_summary {
+ readers = 0,
+ right = Source,
+ valid_total_size = DestinationValid,
+ file_size = DestinationFileSize,
+ locked = true } = DestinationSummary,
+
+ SourceName = filenum_to_name(Source),
+ DestinationName = filenum_to_name(Destination),
+ {ok, SourceHdl} = open_file(Dir, SourceName,
+ ?READ_AHEAD_MODE),
+ {ok, DestinationHdl} = open_file(Dir, DestinationName,
+ ?READ_AHEAD_MODE ++ ?WRITE_MODE),
+ TotalValidData = SourceValid + DestinationValid,
+ %% if DestinationValid =:= DestinationContiguousTop then we don't
+ %% need a tmp file
+ %% if they're not equal, then we need to write out everything past
+ %% the DestinationContiguousTop to a tmp file then truncate,
+ %% copy back in, and then copy over from Source
+ %% otherwise we just truncate straight away and copy over from Source
+ {DestinationWorkList, DestinationValid} =
+ load_and_vacuum_message_file(Destination, State),
+ {DestinationContiguousTop, DestinationWorkListTail} =
+ drop_contiguous_block_prefix(DestinationWorkList),
+ case DestinationWorkListTail of
+ [] -> ok = truncate_and_extend_file(
+ DestinationHdl, DestinationContiguousTop, TotalValidData);
+ _ -> Tmp = filename:rootname(DestinationName) ++ ?FILE_EXTENSION_TMP,
+ {ok, TmpHdl} = open_file(Dir, Tmp, ?READ_AHEAD_MODE++?WRITE_MODE),
+ ok = copy_messages(
+ DestinationWorkListTail, DestinationContiguousTop,
+ DestinationValid, DestinationHdl, TmpHdl, Destination,
+ State),
+ TmpSize = DestinationValid - DestinationContiguousTop,
+ %% so now Tmp contains everything we need to salvage
+ %% from Destination, and index_state has been updated to
+ %% reflect the compaction of Destination so truncate
+ %% Destination and copy from Tmp back to the end
+ {ok, 0} = file_handle_cache:position(TmpHdl, 0),
+ ok = truncate_and_extend_file(
+ DestinationHdl, DestinationContiguousTop, TotalValidData),
+ {ok, TmpSize} =
+ file_handle_cache:copy(TmpHdl, DestinationHdl, TmpSize),
+ %% position in DestinationHdl should now be DestinationValid
+ ok = file_handle_cache:sync(DestinationHdl),
+ ok = file_handle_cache:delete(TmpHdl)
+ end,
+ {SourceWorkList, SourceValid} = load_and_vacuum_message_file(Source, State),
+ ok = copy_messages(SourceWorkList, DestinationValid, TotalValidData,
+ SourceHdl, DestinationHdl, Destination, State),
+ %% tidy up
+ ok = file_handle_cache:close(DestinationHdl),
+ ok = file_handle_cache:close(SourceHdl),
+
+ %% don't update dest.right, because it could be changing at the
+ %% same time
+ true = ets:update_element(
+ FileSummaryEts, Destination,
+ [{#file_summary.valid_total_size, TotalValidData},
+ {#file_summary.file_size, TotalValidData}]),
+
+ Reclaimed = SourceFileSize + DestinationFileSize - TotalValidData,
+ rabbit_log:debug("Combined segment files number ~p (source) and ~p (destination), reclaimed ~p bytes",
+ [Source, Destination, Reclaimed]),
+ gen_server2:cast(Server, {combine_files, Source, Destination, Reclaimed}),
+ safe_file_delete_fun(Source, Dir, FileHandlesEts).
+
+-spec delete_file(non_neg_integer(), gc_state()) -> {ok, deletion_thunk()} | {defer, [non_neg_integer()]}.
+
+delete_file(File, State = #gc_state { file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ dir = Dir,
+ msg_store = Server }) ->
+ case ets:lookup(FileSummaryEts, File) of
+ [#file_summary { valid_total_size = 0,
+ locked = true,
+ file_size = FileSize,
+ readers = 0 }] ->
+ {[], 0} = load_and_vacuum_message_file(File, State),
+ gen_server2:cast(Server, {delete_file, File, FileSize}),
+ {ok, safe_file_delete_fun(File, Dir, FileHandlesEts)};
+ [#file_summary{readers = Readers}] when Readers > 0 ->
+ rabbit_log:debug("Asked to delete file ~p but it has active readers. Deferring.",
+ [File]),
+ {defer, [File]}
+ end.
+
+load_and_vacuum_message_file(File, State = #gc_state { dir = Dir }) ->
+ %% Messages here will be end-of-file at start-of-list
+ {ok, Messages, _FileSize} =
+ scan_file_for_valid_messages(Dir, filenum_to_name(File)),
+ %% foldl will reverse so will end up with msgs in ascending offset order
+ lists:foldl(
+ fun ({MsgId, TotalSize, Offset}, Acc = {List, Size}) ->
+ case index_lookup(MsgId, State) of
+ #msg_location { file = File, total_size = TotalSize,
+ offset = Offset, ref_count = 0 } = Entry ->
+ ok = index_delete_object(Entry, State),
+ Acc;
+ #msg_location { file = File, total_size = TotalSize,
+ offset = Offset } = Entry ->
+ {[ Entry | List ], TotalSize + Size};
+ _ ->
+ Acc
+ end
+ end, {[], 0}, Messages).
+
+copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl,
+ Destination, State) ->
+ Copy = fun ({BlockStart, BlockEnd}) ->
+ BSize = BlockEnd - BlockStart,
+ {ok, BlockStart} =
+ file_handle_cache:position(SourceHdl, BlockStart),
+ {ok, BSize} =
+ file_handle_cache:copy(SourceHdl, DestinationHdl, BSize)
+ end,
+ case
+ lists:foldl(
+ fun (#msg_location { msg_id = MsgId, offset = Offset,
+ total_size = TotalSize },
+ {CurOffset, Block = {BlockStart, BlockEnd}}) ->
+ %% CurOffset is in the DestinationFile.
+ %% Offset, BlockStart and BlockEnd are in the SourceFile
+ %% update MsgLocation to reflect change of file and offset
+ ok = index_update_fields(MsgId,
+ [{#msg_location.file, Destination},
+ {#msg_location.offset, CurOffset}],
+ State),
+ {CurOffset + TotalSize,
+ case BlockEnd of
+ undefined ->
+ %% base case, called only for the first list elem
+ {Offset, Offset + TotalSize};
+ Offset ->
+ %% extend the current block because the
+ %% next msg follows straight on
+ {BlockStart, BlockEnd + TotalSize};
+ _ ->
+ %% found a gap, so actually do the work for
+ %% the previous block
+ Copy(Block),
+ {Offset, Offset + TotalSize}
+ end}
+ end, {InitOffset, {undefined, undefined}}, WorkList) of
+ {FinalOffset, Block} ->
+ case WorkList of
+ [] -> ok;
+ _ -> Copy(Block), %% do the last remaining block
+ ok = file_handle_cache:sync(DestinationHdl)
+ end;
+ {FinalOffsetZ, _Block} ->
+ {gc_error, [{expected, FinalOffset},
+ {got, FinalOffsetZ},
+ {destination, Destination}]}
+ end.
+
+-spec force_recovery(file:filename(), server()) -> 'ok'.
+
+force_recovery(BaseDir, Store) ->
+ Dir = filename:join(BaseDir, atom_to_list(Store)),
+ case file:delete(filename:join(Dir, ?CLEAN_FILENAME)) of
+ ok -> ok;
+ {error, enoent} -> ok
+ end,
+ recover_crashed_compactions(BaseDir),
+ ok.
+
+foreach_file(D, Fun, Files) ->
+ [ok = Fun(filename:join(D, File)) || File <- Files].
+
+foreach_file(D1, D2, Fun, Files) ->
+ [ok = Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files].
+
+-spec transform_dir(file:filename(), server(),
+ fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok'.
+
+transform_dir(BaseDir, Store, TransformFun) ->
+ Dir = filename:join(BaseDir, atom_to_list(Store)),
+ TmpDir = filename:join(Dir, ?TRANSFORM_TMP),
+ TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end,
+ CopyFile = fun (Src, Dst) -> {ok, _Bytes} = file:copy(Src, Dst), ok end,
+ case filelib:is_dir(TmpDir) of
+ true -> throw({error, transform_failed_previously});
+ false -> FileList = list_sorted_filenames(Dir, ?FILE_EXTENSION),
+ foreach_file(Dir, TmpDir, TransformFile, FileList),
+ foreach_file(Dir, fun file:delete/1, FileList),
+ foreach_file(TmpDir, Dir, CopyFile, FileList),
+ foreach_file(TmpDir, fun file:delete/1, FileList),
+ ok = file:del_dir(TmpDir)
+ end.
+
+transform_msg_file(FileOld, FileNew, TransformFun) ->
+ ok = rabbit_file:ensure_parent_dirs_exist(FileNew),
+ {ok, RefOld} = file_handle_cache:open_with_absolute_path(
+ FileOld, [raw, binary, read], []),
+ {ok, RefNew} = file_handle_cache:open_with_absolute_path(
+ FileNew, [raw, binary, write],
+ [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]),
+ {ok, _Acc, _IgnoreSize} =
+ rabbit_msg_file:scan(
+ RefOld, filelib:file_size(FileOld),
+ fun({MsgId, _Size, _Offset, BinMsg}, ok) ->
+ {ok, MsgNew} = case binary_to_term(BinMsg) of
+ <<>> -> {ok, <<>>}; %% dying client marker
+ Msg -> TransformFun(Msg)
+ end,
+ {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew),
+ ok
+ end, ok),
+ ok = file_handle_cache:close(RefOld),
+ ok = file_handle_cache:close(RefNew),
+ ok.
diff --git a/deps/rabbit/src/rabbit_msg_store_ets_index.erl b/deps/rabbit/src/rabbit_msg_store_ets_index.erl
new file mode 100644
index 0000000000..294417b5ba
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_store_ets_index.erl
@@ -0,0 +1,76 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_store_ets_index).
+
+-include("rabbit_msg_store.hrl").
+
+-behaviour(rabbit_msg_store_index).
+
+-export([new/1, recover/1,
+ lookup/2, insert/2, update/2, update_fields/3, delete/2,
+ delete_object/2, clean_up_temporary_reference_count_entries_without_file/1, terminate/1]).
+
+-define(MSG_LOC_NAME, rabbit_msg_store_ets_index).
+-define(FILENAME, "msg_store_index.ets").
+
+-record(state, { table, dir }).
+
+new(Dir) ->
+ file:delete(filename:join(Dir, ?FILENAME)),
+ Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.msg_id}]),
+ #state { table = Tid, dir = Dir }.
+
+recover(Dir) ->
+ Path = filename:join(Dir, ?FILENAME),
+ case ets:file2tab(Path) of
+ {ok, Tid} -> file:delete(Path),
+ {ok, #state { table = Tid, dir = Dir }};
+ Error -> Error
+ end.
+
+lookup(Key, State) ->
+ case ets:lookup(State #state.table, Key) of
+ [] -> not_found;
+ [Entry] -> Entry
+ end.
+
+insert(Obj, State) ->
+ true = ets:insert_new(State #state.table, Obj),
+ ok.
+
+update(Obj, State) ->
+ true = ets:insert(State #state.table, Obj),
+ ok.
+
+update_fields(Key, Updates, State) ->
+ true = ets:update_element(State #state.table, Key, Updates),
+ ok.
+
+delete(Key, State) ->
+ true = ets:delete(State #state.table, Key),
+ ok.
+
+delete_object(Obj, State) ->
+ true = ets:delete_object(State #state.table, Obj),
+ ok.
+
+clean_up_temporary_reference_count_entries_without_file(State) ->
+ MatchHead = #msg_location { file = undefined, _ = '_' },
+ ets:select_delete(State #state.table, [{MatchHead, [], [true]}]),
+ ok.
+
+terminate(#state { table = MsgLocations, dir = Dir }) ->
+ case ets:tab2file(MsgLocations, filename:join(Dir, ?FILENAME),
+ [{extended_info, [object_count]}]) of
+ ok -> ok;
+ {error, Err} ->
+ rabbit_log:error("Unable to save message store index"
+ " for directory ~p.~nError: ~p~n",
+ [Dir, Err])
+ end,
+ ets:delete(MsgLocations).
diff --git a/deps/rabbit/src/rabbit_msg_store_gc.erl b/deps/rabbit/src/rabbit_msg_store_gc.erl
new file mode 100644
index 0000000000..41addc5fa6
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_store_gc.erl
@@ -0,0 +1,125 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_store_gc).
+
+-behaviour(gen_server2).
+
+-export([start_link/1, combine/3, delete/2, no_readers/2, stop/1]).
+
+-export([set_maximum_since_use/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, prioritise_cast/3]).
+
+-record(state,
+ { pending_no_readers,
+ on_action,
+ msg_store_state
+ }).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(rabbit_msg_store:gc_state()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(MsgStoreState) ->
+ gen_server2:start_link(?MODULE, [MsgStoreState],
+ [{timeout, infinity}]).
+
+-spec combine(pid(), rabbit_msg_store:file_num(),
+ rabbit_msg_store:file_num()) -> 'ok'.
+
+combine(Server, Source, Destination) ->
+ gen_server2:cast(Server, {combine, Source, Destination}).
+
+-spec delete(pid(), rabbit_msg_store:file_num()) -> 'ok'.
+
+delete(Server, File) ->
+ gen_server2:cast(Server, {delete, File}).
+
+-spec no_readers(pid(), rabbit_msg_store:file_num()) -> 'ok'.
+
+no_readers(Server, File) ->
+ gen_server2:cast(Server, {no_readers, File}).
+
+-spec stop(pid()) -> 'ok'.
+
+stop(Server) ->
+ gen_server2:call(Server, stop, infinity).
+
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
+
+set_maximum_since_use(Pid, Age) ->
+ gen_server2:cast(Pid, {set_maximum_since_use, Age}).
+
+%%----------------------------------------------------------------------------
+
+init([MsgStoreState]) ->
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+ {ok, #state { pending_no_readers = #{},
+ on_action = [],
+ msg_store_state = MsgStoreState }, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
+prioritise_cast(_Msg, _Len, _State) -> 0.
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State}.
+
+handle_cast({combine, Source, Destination}, State) ->
+ {noreply, attempt_action(combine, [Source, Destination], State), hibernate};
+
+handle_cast({delete, File}, State) ->
+ {noreply, attempt_action(delete, [File], State), hibernate};
+
+handle_cast({no_readers, File},
+ State = #state { pending_no_readers = Pending }) ->
+ {noreply, case maps:find(File, Pending) of
+ error ->
+ State;
+ {ok, {Action, Files}} ->
+ Pending1 = maps:remove(File, Pending),
+ attempt_action(
+ Action, Files,
+ State #state { pending_no_readers = Pending1 })
+ end, hibernate};
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ {noreply, State, hibernate}.
+
+handle_info(Info, State) ->
+ {stop, {unhandled_info, Info}, State}.
+
+terminate(_Reason, State) ->
+ State.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+attempt_action(Action, Files,
+ State = #state { pending_no_readers = Pending,
+ on_action = Thunks,
+ msg_store_state = MsgStoreState }) ->
+ case do_action(Action, Files, MsgStoreState) of
+ {ok, OkThunk} ->
+ State#state{on_action = lists:filter(fun (Thunk) -> not Thunk() end,
+ [OkThunk | Thunks])};
+ {defer, [File | _]} ->
+ Pending1 = maps:put(File, {Action, Files}, Pending),
+ State #state { pending_no_readers = Pending1 }
+ end.
+
+do_action(combine, [Source, Destination], MsgStoreState) ->
+ rabbit_msg_store:combine_files(Source, Destination, MsgStoreState);
+do_action(delete, [File], MsgStoreState) ->
+ rabbit_msg_store:delete_file(File, MsgStoreState).
diff --git a/deps/rabbit/src/rabbit_networking.erl b/deps/rabbit/src/rabbit_networking.erl
new file mode 100644
index 0000000000..433b1d7540
--- /dev/null
+++ b/deps/rabbit/src/rabbit_networking.erl
@@ -0,0 +1,663 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_networking).
+
+%% This module contains various functions that deal with networking,
+%% TCP and TLS listeners, and connection information.
+%%
+%% It also contains a boot step — boot/0 — that starts networking machinery.
+%% This module primarily covers AMQP 0-9-1 but some bits are reused in
+%% plugins that provide protocol support, e.g. STOMP or MQTT.
+%%
+%% Functions in this module take care of normalising TCP listener options,
+%% including dual IP stack cases, and starting the AMQP 0-9-1 listener(s).
+%%
+%% See also tcp_listener_sup and tcp_listener.
+
+-export([boot/0, start_tcp_listener/2, start_ssl_listener/3,
+ stop_tcp_listener/1, on_node_down/1, active_listeners/0,
+ node_listeners/1, node_client_listeners/1,
+ register_connection/1, unregister_connection/1,
+ register_non_amqp_connection/1, unregister_non_amqp_connection/1,
+ connections/0, non_amqp_connections/0, connection_info_keys/0,
+ connection_info/1, connection_info/2,
+ connection_info_all/0, connection_info_all/1,
+ emit_connection_info_all/4, emit_connection_info_local/3,
+ close_connection/2, close_connections/2, close_all_connections/1,
+ force_connection_event_refresh/1, force_non_amqp_connection_event_refresh/1,
+ handshake/2, tcp_host/1,
+ ranch_ref/1, ranch_ref/2, ranch_ref_of_protocol/1,
+ listener_of_protocol/1, stop_ranch_listener_of_protocol/1]).
+
+%% Used by TCP-based transports, e.g. STOMP adapter
+-export([tcp_listener_addresses/1, tcp_listener_spec/9,
+ ensure_ssl/0, fix_ssl_options/1, poodle_check/1]).
+
+-export([tcp_listener_started/4, tcp_listener_stopped/4]).
+
+-deprecated([{force_connection_event_refresh, 1, eventually}]).
+
+-export([
+ local_connections/0,
+ local_non_amqp_connections/0,
+ %% prefer local_connections/0
+ connections_local/0
+]).
+
+-include("rabbit.hrl").
+-include("rabbit_misc.hrl").
+
+%% IANA-suggested ephemeral port range is 49152 to 65535
+-define(FIRST_TEST_BIND_PORT, 49152).
+
+%%----------------------------------------------------------------------------
+
+-export_type([ip_port/0, hostname/0]).
+
+-type hostname() :: rabbit_net:hostname().
+-type ip_port() :: rabbit_net:ip_port().
+
+-type family() :: atom().
+-type listener_config() :: ip_port() |
+ {hostname(), ip_port()} |
+ {hostname(), ip_port(), family()}.
+-type address() :: {inet:ip_address(), ip_port(), family()}.
+-type name_prefix() :: atom().
+-type protocol() :: atom().
+-type label() :: string().
+
+-spec boot() -> 'ok' | no_return().
+
+boot() ->
+ ok = record_distribution_listener(),
+ _ = application:start(ranch),
+ rabbit_log:debug("Started Ranch"),
+ %% Failures will throw exceptions
+ _ = boot_listeners(fun boot_tcp/1, application:get_env(rabbit, num_tcp_acceptors, 10), "TCP"),
+ _ = boot_listeners(fun boot_tls/1, application:get_env(rabbit, num_ssl_acceptors, 10), "TLS"),
+ ok.
+
+boot_listeners(Fun, NumAcceptors, Type) ->
+ case Fun(NumAcceptors) of
+ ok ->
+ ok;
+ {error, {could_not_start_listener, Address, Port, Details}} = Error ->
+ rabbit_log:error("Failed to start ~s listener [~s]:~p, error: ~p",
+ [Type, Address, Port, Details]),
+ throw(Error)
+ end.
+
+boot_tcp(NumAcceptors) ->
+ {ok, TcpListeners} = application:get_env(tcp_listeners),
+ case lists:foldl(fun(Listener, ok) ->
+ start_tcp_listener(Listener, NumAcceptors);
+ (_Listener, Error) ->
+ Error
+ end,
+ ok, TcpListeners) of
+ ok -> ok;
+ {error, _} = Error -> Error
+ end.
+
+boot_tls(NumAcceptors) ->
+ case application:get_env(ssl_listeners) of
+ {ok, []} ->
+ ok;
+ {ok, SslListeners} ->
+ SslOpts = ensure_ssl(),
+ case poodle_check('AMQP') of
+ ok -> [start_ssl_listener(L, SslOpts, NumAcceptors) || L <- SslListeners];
+ danger -> ok
+ end,
+ ok
+ end.
+
+-spec ensure_ssl() -> rabbit_types:infos().
+
+ensure_ssl() ->
+ {ok, SslAppsConfig} = application:get_env(rabbit, ssl_apps),
+ ok = app_utils:start_applications(SslAppsConfig),
+ {ok, SslOptsConfig0} = application:get_env(rabbit, ssl_options),
+ rabbit_ssl_options:fix(SslOptsConfig0).
+
+-spec poodle_check(atom()) -> 'ok' | 'danger'.
+
+poodle_check(Context) ->
+ {ok, Vsn} = application:get_key(ssl, vsn),
+ case rabbit_misc:version_compare(Vsn, "5.3", gte) of %% R16B01
+ true -> ok;
+ false -> case application:get_env(rabbit, ssl_allow_poodle_attack) of
+ {ok, true} -> ok;
+ _ -> log_poodle_fail(Context),
+ danger
+ end
+ end.
+
+log_poodle_fail(Context) ->
+ rabbit_log:error(
+ "The installed version of Erlang (~s) contains the bug OTP-10905,~n"
+ "which makes it impossible to disable SSLv3. This makes the system~n"
+ "vulnerable to the POODLE attack. SSL listeners for ~s have therefore~n"
+ "been disabled.~n~n"
+ "You are advised to upgrade to a recent Erlang version; R16B01 is the~n"
+ "first version in which this bug is fixed, but later is usually~n"
+ "better.~n~n"
+ "If you cannot upgrade now and want to re-enable SSL listeners, you can~n"
+ "set the config item 'ssl_allow_poodle_attack' to 'true' in the~n"
+ "'rabbit' section of your configuration file.~n",
+ [rabbit_misc:otp_release(), Context]).
+
+fix_ssl_options(Config) ->
+ rabbit_ssl_options:fix(Config).
+
+-spec tcp_listener_addresses(listener_config()) -> [address()].
+
+tcp_listener_addresses(Port) when is_integer(Port) ->
+ tcp_listener_addresses_auto(Port);
+tcp_listener_addresses({"auto", Port}) ->
+ %% Variant to prevent lots of hacking around in bash and batch files
+ tcp_listener_addresses_auto(Port);
+tcp_listener_addresses({Host, Port}) ->
+ %% auto: determine family IPv4 / IPv6 after converting to IP address
+ tcp_listener_addresses({Host, Port, auto});
+tcp_listener_addresses({Host, Port, Family0})
+ when is_integer(Port) andalso (Port >= 0) andalso (Port =< 65535) ->
+ [{IPAddress, Port, Family} ||
+ {IPAddress, Family} <- getaddr(Host, Family0)];
+tcp_listener_addresses({_Host, Port, _Family0}) ->
+ rabbit_log:error("invalid port ~p - not 0..65535~n", [Port]),
+ throw({error, {invalid_port, Port}}).
+
+tcp_listener_addresses_auto(Port) ->
+ lists:append([tcp_listener_addresses(Listener) ||
+ Listener <- port_to_listeners(Port)]).
+
+-spec tcp_listener_spec
+ (name_prefix(), address(), [gen_tcp:listen_option()], module(), module(),
+ any(), protocol(), non_neg_integer(), label()) ->
+ supervisor:child_spec().
+
+tcp_listener_spec(NamePrefix, {IPAddress, Port, Family}, SocketOpts,
+ Transport, ProtoSup, ProtoOpts, Protocol, NumAcceptors, Label) ->
+ Args = [IPAddress, Port, Transport, [Family | SocketOpts], ProtoSup, ProtoOpts,
+ {?MODULE, tcp_listener_started, [Protocol, SocketOpts]},
+ {?MODULE, tcp_listener_stopped, [Protocol, SocketOpts]},
+ NumAcceptors, Label],
+ {rabbit_misc:tcp_name(NamePrefix, IPAddress, Port),
+ {tcp_listener_sup, start_link, Args},
+ transient, infinity, supervisor, [tcp_listener_sup]}.
+
+-spec ranch_ref(#listener{} | [{atom(), any()}] | 'undefined') -> ranch:ref() | undefined.
+ranch_ref(#listener{port = Port}) ->
+ [{IPAddress, Port, _Family} | _] = tcp_listener_addresses(Port),
+ {acceptor, IPAddress, Port};
+ranch_ref(Listener) when is_list(Listener) ->
+ Port = rabbit_misc:pget(port, Listener),
+ [{IPAddress, Port, _Family} | _] = tcp_listener_addresses(Port),
+ {acceptor, IPAddress, Port};
+ranch_ref(undefined) ->
+ undefined.
+
+-spec ranch_ref(inet:ip_address(), ip_port()) -> ranch:ref().
+
+%% Returns a reference that identifies a TCP listener in Ranch.
+ranch_ref(IPAddress, Port) ->
+ {acceptor, IPAddress, Port}.
+
+-spec ranch_ref_of_protocol(atom()) -> ranch:ref() | undefined.
+ranch_ref_of_protocol(Protocol) ->
+ ranch_ref(listener_of_protocol(Protocol)).
+
+-spec listener_of_protocol(atom()) -> #listener{}.
+listener_of_protocol(Protocol) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ MatchSpec = #listener{
+ node = node(),
+ protocol = Protocol,
+ _ = '_'
+ },
+ case mnesia:match_object(rabbit_listener, MatchSpec, read) of
+ [] -> undefined;
+ [Row] -> Row
+ end
+ end).
+
+-spec stop_ranch_listener_of_protocol(atom()) -> ok | {error, not_found}.
+stop_ranch_listener_of_protocol(Protocol) ->
+ case rabbit_networking:ranch_ref_of_protocol(Protocol) of
+ undefined -> ok;
+ Ref ->
+ rabbit_log:debug("Stopping Ranch listener for protocol ~s", [Protocol]),
+ ranch:stop_listener(Ref)
+ end.
+
+-spec start_tcp_listener(
+ listener_config(), integer()) -> 'ok' | {'error', term()}.
+
+start_tcp_listener(Listener, NumAcceptors) ->
+ start_listener(Listener, NumAcceptors, amqp, "TCP listener", tcp_opts()).
+
+-spec start_ssl_listener(
+ listener_config(), rabbit_types:infos(), integer()) -> 'ok' | {'error', term()}.
+
+start_ssl_listener(Listener, SslOpts, NumAcceptors) ->
+ start_listener(Listener, NumAcceptors, 'amqp/ssl', "TLS (SSL) listener", tcp_opts() ++ SslOpts).
+
+
+-spec start_listener(
+ listener_config(), integer(), protocol(), label(), list()) -> 'ok' | {'error', term()}.
+start_listener(Listener, NumAcceptors, Protocol, Label, Opts) ->
+ lists:foldl(fun (Address, ok) ->
+ start_listener0(Address, NumAcceptors, Protocol, Label, Opts);
+ (_Address, {error, _} = Error) ->
+ Error
+ end, ok, tcp_listener_addresses(Listener)).
+
+start_listener0(Address, NumAcceptors, Protocol, Label, Opts) ->
+ Transport = transport(Protocol),
+ Spec = tcp_listener_spec(rabbit_tcp_listener_sup, Address, Opts,
+ Transport, rabbit_connection_sup, [], Protocol,
+ NumAcceptors, Label),
+ case supervisor:start_child(rabbit_sup, Spec) of
+ {ok, _} -> ok;
+ {error, {{shutdown, {failed_to_start_child, _,
+ {shutdown, {failed_to_start_child, _,
+ {listen_error, _, PosixError}}}}}, _}} ->
+ {IPAddress, Port, _Family} = Address,
+ {error, {could_not_start_listener, rabbit_misc:ntoa(IPAddress), Port, PosixError}};
+ {error, Other} ->
+ {IPAddress, Port, _Family} = Address,
+ {error, {could_not_start_listener, rabbit_misc:ntoa(IPAddress), Port, Other}}
+ end.
+
+transport(Protocol) ->
+ case Protocol of
+ amqp -> ranch_tcp;
+ 'amqp/ssl' -> ranch_ssl
+ end.
+
+-spec stop_tcp_listener(listener_config()) -> 'ok'.
+
+stop_tcp_listener(Listener) ->
+ [stop_tcp_listener0(Address) ||
+ Address <- tcp_listener_addresses(Listener)],
+ ok.
+
+stop_tcp_listener0({IPAddress, Port, _Family}) ->
+ Name = rabbit_misc:tcp_name(rabbit_tcp_listener_sup, IPAddress, Port),
+ ok = supervisor:terminate_child(rabbit_sup, Name),
+ ok = supervisor:delete_child(rabbit_sup, Name).
+
+-spec tcp_listener_started
+ (_, _,
+ string() |
+ {byte(),byte(),byte(),byte()} |
+ {char(),char(),char(),char(),char(),char(),char(),char()}, _) ->
+ 'ok'.
+
+tcp_listener_started(Protocol, Opts, IPAddress, Port) ->
+ %% We need the ip to distinguish e.g. 0.0.0.0 and 127.0.0.1
+ %% We need the host so we can distinguish multiple instances of the above
+ %% in a cluster.
+ ok = mnesia:dirty_write(
+ rabbit_listener,
+ #listener{node = node(),
+ protocol = Protocol,
+ host = tcp_host(IPAddress),
+ ip_address = IPAddress,
+ port = Port,
+ opts = Opts}).
+
+-spec tcp_listener_stopped
+ (_, _,
+ string() |
+ {byte(),byte(),byte(),byte()} |
+ {char(),char(),char(),char(),char(),char(),char(),char()},
+ _) ->
+ 'ok'.
+
+tcp_listener_stopped(Protocol, Opts, IPAddress, Port) ->
+ ok = mnesia:dirty_delete_object(
+ rabbit_listener,
+ #listener{node = node(),
+ protocol = Protocol,
+ host = tcp_host(IPAddress),
+ ip_address = IPAddress,
+ port = Port,
+ opts = Opts}).
+
+-spec record_distribution_listener() -> ok | no_return().
+
+record_distribution_listener() ->
+ {Name, Host} = rabbit_nodes:parts(node()),
+ case erl_epmd:port_please(list_to_atom(Name), Host, infinity) of
+ {port, Port, _Version} ->
+ tcp_listener_started(clustering, [], {0,0,0,0,0,0,0,0}, Port);
+ noport ->
+ throw({error, no_epmd_port})
+ end.
+
+-spec active_listeners() -> [rabbit_types:listener()].
+
+active_listeners() ->
+ rabbit_misc:dirty_read_all(rabbit_listener).
+
+-spec node_listeners(node()) -> [rabbit_types:listener()].
+
+node_listeners(Node) ->
+ mnesia:dirty_read(rabbit_listener, Node).
+
+-spec node_client_listeners(node()) -> [rabbit_types:listener()].
+
+node_client_listeners(Node) ->
+ case node_listeners(Node) of
+ [] -> [];
+ Xs ->
+ lists:filter(fun (#listener{protocol = clustering}) -> false;
+ (_) -> true
+ end, Xs)
+ end.
+
+-spec on_node_down(node()) -> 'ok'.
+
+on_node_down(Node) ->
+ case lists:member(Node, nodes()) of
+ false ->
+ rabbit_log:info(
+ "Node ~s is down, deleting its listeners~n", [Node]),
+ ok = mnesia:dirty_delete(rabbit_listener, Node);
+ true ->
+ rabbit_log:info(
+ "Keeping ~s listeners: the node is already back~n", [Node])
+ end.
+
+-spec register_connection(pid()) -> ok.
+
+register_connection(Pid) -> pg_local:join(rabbit_connections, Pid).
+
+-spec unregister_connection(pid()) -> ok.
+
+unregister_connection(Pid) -> pg_local:leave(rabbit_connections, Pid).
+
+-spec connections() -> [rabbit_types:connection()].
+
+connections() ->
+ Nodes = rabbit_nodes:all_running(),
+ rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_networking, connections_local, [], ?RPC_TIMEOUT).
+
+-spec local_connections() -> [rabbit_types:connection()].
+%% @doc Returns pids of AMQP 0-9-1 and AMQP 1.0 connections local to this node.
+local_connections() ->
+ connections_local().
+
+-spec connections_local() -> [rabbit_types:connection()].
+%% @deprecated Prefer {@link local_connections}
+connections_local() -> pg_local:get_members(rabbit_connections).
+
+-spec register_non_amqp_connection(pid()) -> ok.
+
+register_non_amqp_connection(Pid) -> pg_local:join(rabbit_non_amqp_connections, Pid).
+
+-spec unregister_non_amqp_connection(pid()) -> ok.
+
+unregister_non_amqp_connection(Pid) -> pg_local:leave(rabbit_non_amqp_connections, Pid).
+
+-spec non_amqp_connections() -> [rabbit_types:connection()].
+
+non_amqp_connections() ->
+ Nodes = rabbit_nodes:all_running(),
+ rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_networking, local_non_amqp_connections, [], ?RPC_TIMEOUT).
+
+-spec local_non_amqp_connections() -> [rabbit_types:connection()].
+local_non_amqp_connections() ->
+ pg_local:get_members(rabbit_non_amqp_connections).
+
+-spec connection_info_keys() -> rabbit_types:info_keys().
+
+connection_info_keys() -> rabbit_reader:info_keys().
+
+-spec connection_info(rabbit_types:connection()) -> rabbit_types:infos().
+
+connection_info(Pid) -> rabbit_reader:info(Pid).
+
+-spec connection_info(rabbit_types:connection(), rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+
+connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items).
+
+-spec connection_info_all() -> [rabbit_types:infos()].
+
+connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end).
+
+-spec connection_info_all(rabbit_types:info_keys()) ->
+ [rabbit_types:infos()].
+
+connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end).
+
+emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_networking, emit_connection_info_local, [Items, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ ok.
+
+emit_connection_info_local(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Q) -> connection_info(Q, Items) end,
+ connections_local()).
+
+-spec close_connection(pid(), string()) -> 'ok'.
+
+close_connection(Pid, Explanation) ->
+ case lists:member(Pid, connections()) of
+ true ->
+ Res = rabbit_reader:shutdown(Pid, Explanation),
+ rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]),
+ Res;
+ false ->
+ rabbit_log:warning("Asked to close connection ~p (reason: ~p) "
+ "but no running cluster node reported it as an active connection. Was it already closed? ~n",
+ [Pid, Explanation]),
+ ok
+ end.
+
+-spec close_connections([pid()], string()) -> 'ok'.
+close_connections(Pids, Explanation) ->
+ [close_connection(Pid, Explanation) || Pid <- Pids],
+ ok.
+
+%% Meant to be used by tests only
+-spec close_all_connections(string()) -> 'ok'.
+close_all_connections(Explanation) ->
+ Pids = connections(),
+ [close_connection(Pid, Explanation) || Pid <- Pids],
+ ok.
+
+-spec force_connection_event_refresh(reference()) -> 'ok'.
+force_connection_event_refresh(Ref) ->
+ [rabbit_reader:force_event_refresh(C, Ref) || C <- connections()],
+ ok.
+
+-spec force_non_amqp_connection_event_refresh(reference()) -> 'ok'.
+force_non_amqp_connection_event_refresh(Ref) ->
+ [gen_server:cast(Pid, {force_event_refresh, Ref}) || Pid <- non_amqp_connections()],
+ ok.
+
+-spec failed_to_recv_proxy_header(_, _) -> no_return().
+failed_to_recv_proxy_header(Ref, Error) ->
+ Msg = case Error of
+ closed -> "error when receiving proxy header: TCP socket was ~p prematurely";
+ _Other -> "error when receiving proxy header: ~p"
+ end,
+ rabbit_log:debug(Msg, [Error]),
+ % The following call will clean up resources then exit
+ _ = ranch:handshake(Ref),
+ exit({shutdown, failed_to_recv_proxy_header}).
+
+handshake(Ref, ProxyProtocolEnabled) ->
+ case ProxyProtocolEnabled of
+ true ->
+ case ranch:recv_proxy_header(Ref, 3000) of
+ {error, Error} ->
+ failed_to_recv_proxy_header(Ref, Error);
+ {error, protocol_error, Error} ->
+ failed_to_recv_proxy_header(Ref, Error);
+ {ok, ProxyInfo} ->
+ {ok, Sock} = ranch:handshake(Ref),
+ setup_socket(Sock),
+ {ok, {rabbit_proxy_socket, Sock, ProxyInfo}}
+ end;
+ false ->
+ {ok, Sock} = ranch:handshake(Ref),
+ setup_socket(Sock),
+ {ok, Sock}
+ end.
+
+setup_socket(Sock) ->
+ ok = tune_buffer_size(Sock),
+ ok = file_handle_cache:obtain().
+
+tune_buffer_size(Sock) ->
+ case tune_buffer_size1(Sock) of
+ ok -> ok;
+ {error, _} -> rabbit_net:fast_close(Sock),
+ exit(normal)
+ end.
+
+tune_buffer_size1(Sock) ->
+ case rabbit_net:getopts(Sock, [sndbuf, recbuf, buffer]) of
+ {ok, BufSizes} -> BufSz = lists:max([Sz || {_Opt, Sz} <- BufSizes]),
+ rabbit_net:setopts(Sock, [{buffer, BufSz}]);
+ Error -> Error
+ end.
+
+%%--------------------------------------------------------------------
+
+tcp_host(IPAddress) ->
+ rabbit_net:tcp_host(IPAddress).
+
+cmap(F) -> rabbit_misc:filter_exit_map(F, connections()).
+
+tcp_opts() ->
+ {ok, ConfigOpts} = application:get_env(rabbit, tcp_listen_options),
+ ConfigOpts.
+
+%% inet_parse:address takes care of ip string, like "0.0.0.0"
+%% inet:getaddr returns immediately for ip tuple {0,0,0,0},
+%% and runs 'inet_gethost' port process for dns lookups.
+%% On Windows inet:getaddr runs dns resolver for ip string, which may fail.
+getaddr(Host, Family) ->
+ case inet_parse:address(Host) of
+ {ok, IPAddress} -> [{IPAddress, resolve_family(IPAddress, Family)}];
+ {error, _} -> gethostaddr(Host, Family)
+ end.
+
+gethostaddr(Host, auto) ->
+ Lookups = [{Family, inet:getaddr(Host, Family)} || Family <- [inet, inet6]],
+ case [{IP, Family} || {Family, {ok, IP}} <- Lookups] of
+ [] -> host_lookup_error(Host, Lookups);
+ IPs -> IPs
+ end;
+
+gethostaddr(Host, Family) ->
+ case inet:getaddr(Host, Family) of
+ {ok, IPAddress} -> [{IPAddress, Family}];
+ {error, Reason} -> host_lookup_error(Host, Reason)
+ end.
+
+-spec host_lookup_error(_, _) -> no_return().
+host_lookup_error(Host, Reason) ->
+ rabbit_log:error("invalid host ~p - ~p~n", [Host, Reason]),
+ throw({error, {invalid_host, Host, Reason}}).
+
+resolve_family({_,_,_,_}, auto) -> inet;
+resolve_family({_,_,_,_,_,_,_,_}, auto) -> inet6;
+resolve_family(IP, auto) -> throw({error, {strange_family, IP}});
+resolve_family(_, F) -> F.
+
+%%--------------------------------------------------------------------
+
+%% There are three kinds of machine (for our purposes).
+%%
+%% * Those which treat IPv4 addresses as a special kind of IPv6 address
+%% ("Single stack")
+%% - Linux by default, Windows Vista and later
+%% - We also treat any (hypothetical?) IPv6-only machine the same way
+%% * Those which consider IPv6 and IPv4 to be completely separate things
+%% ("Dual stack")
+%% - OpenBSD, Windows XP / 2003, Linux if so configured
+%% * Those which do not support IPv6.
+%% - Ancient/weird OSes, Linux if so configured
+%%
+%% How to reconfigure Linux to test this:
+%% Single stack (default):
+%% echo 0 > /proc/sys/net/ipv6/bindv6only
+%% Dual stack:
+%% echo 1 > /proc/sys/net/ipv6/bindv6only
+%% IPv4 only:
+%% add ipv6.disable=1 to GRUB_CMDLINE_LINUX_DEFAULT in /etc/default/grub then
+%% sudo update-grub && sudo reboot
+%%
+%% This matters in (and only in) the case where the sysadmin (or the
+%% app descriptor) has only supplied a port and we wish to bind to
+%% "all addresses". This means different things depending on whether
+%% we're single or dual stack. On single stack binding to "::"
+%% implicitly includes all IPv4 addresses, and subsequently attempting
+%% to bind to "0.0.0.0" will fail. On dual stack, binding to "::" will
+%% only bind to IPv6 addresses, and we need another listener bound to
+%% "0.0.0.0" for IPv4. Finally, on IPv4-only systems we of course only
+%% want to bind to "0.0.0.0".
+%%
+%% Unfortunately it seems there is no way to detect single vs dual stack
+%% apart from attempting to bind to the port.
+port_to_listeners(Port) ->
+ IPv4 = {"0.0.0.0", Port, inet},
+ IPv6 = {"::", Port, inet6},
+ case ipv6_status(?FIRST_TEST_BIND_PORT) of
+ single_stack -> [IPv6];
+ ipv6_only -> [IPv6];
+ dual_stack -> [IPv6, IPv4];
+ ipv4_only -> [IPv4]
+ end.
+
+ipv6_status(TestPort) ->
+ IPv4 = [inet, {ip, {0,0,0,0}}],
+ IPv6 = [inet6, {ip, {0,0,0,0,0,0,0,0}}],
+ case gen_tcp:listen(TestPort, IPv6) of
+ {ok, LSock6} ->
+ case gen_tcp:listen(TestPort, IPv4) of
+ {ok, LSock4} ->
+ %% Dual stack
+ gen_tcp:close(LSock6),
+ gen_tcp:close(LSock4),
+ dual_stack;
+ %% Checking the error here would only let us
+ %% distinguish single stack IPv6 / IPv4 vs IPv6 only,
+ %% which we figure out below anyway.
+ {error, _} ->
+ gen_tcp:close(LSock6),
+ case gen_tcp:listen(TestPort, IPv4) of
+ %% Single stack
+ {ok, LSock4} -> gen_tcp:close(LSock4),
+ single_stack;
+ %% IPv6-only machine. Welcome to the future.
+ {error, eafnosupport} -> ipv6_only; %% Linux
+ {error, eprotonosupport}-> ipv6_only; %% FreeBSD
+ %% Dual stack machine with something already
+ %% on IPv4.
+ {error, _} -> ipv6_status(TestPort + 1)
+ end
+ end;
+ %% IPv4-only machine. Welcome to the 90s.
+ {error, eafnosupport} -> %% Linux
+ ipv4_only;
+ {error, eprotonosupport} -> %% FreeBSD
+ ipv4_only;
+ %% Port in use
+ {error, _} ->
+ ipv6_status(TestPort + 1)
+ end.
diff --git a/deps/rabbit/src/rabbit_node_monitor.erl b/deps/rabbit/src/rabbit_node_monitor.erl
new file mode 100644
index 0000000000..b56180c54c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_node_monitor.erl
@@ -0,0 +1,926 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_node_monitor).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([running_nodes_filename/0,
+ cluster_status_filename/0, quorum_filename/0, default_quorum_filename/0,
+ prepare_cluster_status_files/0,
+ write_cluster_status/1, read_cluster_status/0,
+ update_cluster_status/0, reset_cluster_status/0]).
+-export([notify_node_up/0, notify_joined_cluster/0, notify_left_cluster/1]).
+-export([partitions/0, partitions/1, status/1, subscribe/1]).
+-export([pause_partition_guard/0]).
+-export([global_sync/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+ %% Utils
+-export([all_rabbit_nodes_up/0, run_outside_applications/2, ping_all/0,
+ alive_nodes/1, alive_rabbit_nodes/1]).
+
+-define(SERVER, ?MODULE).
+-define(NODE_REPLY_TIMEOUT, 5000).
+-define(RABBIT_UP_RPC_TIMEOUT, 2000).
+-define(RABBIT_DOWN_PING_INTERVAL, 1000).
+
+-record(state, {monitors, partitions, subscribers, down_ping_timer,
+ keepalive_timer, autoheal, guid, node_guids}).
+
+%%----------------------------------------------------------------------------
+%% Start
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+%%----------------------------------------------------------------------------
+%% Cluster file operations
+%%----------------------------------------------------------------------------
+
+%% The cluster file information is kept in two files. The "cluster
+%% status file" contains all the clustered nodes and the disc nodes.
+%% The "running nodes file" contains the currently running nodes or
+%% the running nodes at shutdown when the node is down.
+%%
+%% We strive to keep the files up to date and we rely on this
+%% assumption in various situations. Obviously when mnesia is offline
+%% the information we have will be outdated, but it cannot be
+%% otherwise.
+
+-spec running_nodes_filename() -> string().
+
+running_nodes_filename() ->
+ filename:join(rabbit_mnesia:dir(), "nodes_running_at_shutdown").
+
+-spec cluster_status_filename() -> string().
+
+cluster_status_filename() ->
+ filename:join(rabbit_mnesia:dir(), "cluster_nodes.config").
+
+quorum_filename() ->
+ ra_env:data_dir().
+
+default_quorum_filename() ->
+ filename:join(rabbit_mnesia:dir(), "quorum").
+
+-spec prepare_cluster_status_files() -> 'ok' | no_return().
+
+prepare_cluster_status_files() ->
+ rabbit_mnesia:ensure_mnesia_dir(),
+ RunningNodes1 = case try_read_file(running_nodes_filename()) of
+ {ok, [Nodes]} when is_list(Nodes) -> Nodes;
+ {ok, Other} -> corrupt_cluster_status_files(Other);
+ {error, enoent} -> []
+ end,
+ ThisNode = [node()],
+ %% The running nodes file might contain a set or a list, in case
+ %% of the legacy file
+ RunningNodes2 = lists:usort(ThisNode ++ RunningNodes1),
+ {AllNodes1, DiscNodes} =
+ case try_read_file(cluster_status_filename()) of
+ {ok, [{AllNodes, DiscNodes0}]} ->
+ {AllNodes, DiscNodes0};
+ {ok, [AllNodes0]} when is_list(AllNodes0) ->
+ {legacy_cluster_nodes(AllNodes0), legacy_disc_nodes(AllNodes0)};
+ {ok, Files} ->
+ corrupt_cluster_status_files(Files);
+ {error, enoent} ->
+ LegacyNodes = legacy_cluster_nodes([]),
+ {LegacyNodes, LegacyNodes}
+ end,
+ AllNodes2 = lists:usort(AllNodes1 ++ RunningNodes2),
+ ok = write_cluster_status({AllNodes2, DiscNodes, RunningNodes2}).
+
+-spec corrupt_cluster_status_files(any()) -> no_return().
+
+corrupt_cluster_status_files(F) ->
+ throw({error, corrupt_cluster_status_files, F}).
+
+-spec write_cluster_status(rabbit_mnesia:cluster_status()) -> 'ok'.
+
+write_cluster_status({All, Disc, Running}) ->
+ ClusterStatusFN = cluster_status_filename(),
+ Res = case rabbit_file:write_term_file(ClusterStatusFN, [{All, Disc}]) of
+ ok ->
+ RunningNodesFN = running_nodes_filename(),
+ {RunningNodesFN,
+ rabbit_file:write_term_file(RunningNodesFN, [Running])};
+ E1 = {error, _} ->
+ {ClusterStatusFN, E1}
+ end,
+ case Res of
+ {_, ok} -> ok;
+ {FN, {error, E2}} -> throw({error, {could_not_write_file, FN, E2}})
+ end.
+
+-spec read_cluster_status() -> rabbit_mnesia:cluster_status().
+
+read_cluster_status() ->
+ case {try_read_file(cluster_status_filename()),
+ try_read_file(running_nodes_filename())} of
+ {{ok, [{All, Disc}]}, {ok, [Running]}} when is_list(Running) ->
+ {All, Disc, Running};
+ {Stat, Run} ->
+ throw({error, {corrupt_or_missing_cluster_files, Stat, Run}})
+ end.
+
+-spec update_cluster_status() -> 'ok'.
+
+update_cluster_status() ->
+ {ok, Status} = rabbit_mnesia:cluster_status_from_mnesia(),
+ write_cluster_status(Status).
+
+-spec reset_cluster_status() -> 'ok'.
+
+reset_cluster_status() ->
+ write_cluster_status({[node()], [node()], [node()]}).
+
+%%----------------------------------------------------------------------------
+%% Cluster notifications
+%%----------------------------------------------------------------------------
+
+-spec notify_node_up() -> 'ok'.
+
+notify_node_up() ->
+ gen_server:cast(?SERVER, notify_node_up).
+
+-spec notify_joined_cluster() -> 'ok'.
+
+notify_joined_cluster() ->
+ Nodes = rabbit_nodes:all_running() -- [node()],
+ gen_server:abcast(Nodes, ?SERVER,
+ {joined_cluster, node(), rabbit_mnesia:node_type()}),
+ ok.
+
+-spec notify_left_cluster(node()) -> 'ok'.
+
+notify_left_cluster(Node) ->
+ Nodes = rabbit_nodes:all_running(),
+ gen_server:abcast(Nodes, ?SERVER, {left_cluster, Node}),
+ ok.
+
+%%----------------------------------------------------------------------------
+%% Server calls
+%%----------------------------------------------------------------------------
+
+-spec partitions() -> [node()].
+
+partitions() ->
+ gen_server:call(?SERVER, partitions, infinity).
+
+-spec partitions([node()]) -> [{node(), [node()]}].
+
+partitions(Nodes) ->
+ {Replies, _} = gen_server:multi_call(Nodes, ?SERVER, partitions, ?NODE_REPLY_TIMEOUT),
+ Replies.
+
+-spec status([node()]) -> {[{node(), [node()]}], [node()]}.
+
+status(Nodes) ->
+ gen_server:multi_call(Nodes, ?SERVER, status, infinity).
+
+-spec subscribe(pid()) -> 'ok'.
+
+subscribe(Pid) ->
+ gen_server:cast(?SERVER, {subscribe, Pid}).
+
+%%----------------------------------------------------------------------------
+%% pause_minority/pause_if_all_down safety
+%%----------------------------------------------------------------------------
+
+%% If we are in a minority and pause_minority mode then a) we are
+%% going to shut down imminently and b) we should not confirm anything
+%% until then, since anything we confirm is likely to be lost.
+%%
+%% The same principles apply to a node which isn't part of the preferred
+%% partition when we are in pause_if_all_down mode.
+%%
+%% We could confirm something by having an HA queue see the pausing
+%% state (and fail over into it) before the node monitor stops us, or
+%% by using unmirrored queues and just having them vanish (and
+%% confirming messages as thrown away).
+%%
+%% So we have channels call in here before issuing confirms, to do a
+%% lightweight check that we have not entered a pausing state.
+
+-spec pause_partition_guard() -> 'ok' | 'pausing'.
+
+pause_partition_guard() ->
+ case get(pause_partition_guard) of
+ not_pause_mode ->
+ ok;
+ undefined ->
+ {ok, M} = application:get_env(rabbit, cluster_partition_handling),
+ case M of
+ pause_minority ->
+ pause_minority_guard([], ok);
+ {pause_if_all_down, PreferredNodes, _} ->
+ pause_if_all_down_guard(PreferredNodes, [], ok);
+ _ ->
+ put(pause_partition_guard, not_pause_mode),
+ ok
+ end;
+ {minority_mode, Nodes, LastState} ->
+ pause_minority_guard(Nodes, LastState);
+ {pause_if_all_down_mode, PreferredNodes, Nodes, LastState} ->
+ pause_if_all_down_guard(PreferredNodes, Nodes, LastState)
+ end.
+
+pause_minority_guard(LastNodes, LastState) ->
+ case nodes() of
+ LastNodes -> LastState;
+ _ -> NewState = case majority() of
+ false -> pausing;
+ true -> ok
+ end,
+ put(pause_partition_guard,
+ {minority_mode, nodes(), NewState}),
+ NewState
+ end.
+
+pause_if_all_down_guard(PreferredNodes, LastNodes, LastState) ->
+ case nodes() of
+ LastNodes -> LastState;
+ _ -> NewState = case in_preferred_partition(PreferredNodes) of
+ false -> pausing;
+ true -> ok
+ end,
+ put(pause_partition_guard,
+ {pause_if_all_down_mode, PreferredNodes, nodes(),
+ NewState}),
+ NewState
+ end.
+
+%%----------------------------------------------------------------------------
+%% "global" hang workaround.
+%%----------------------------------------------------------------------------
+
+%% This code works around a possible inconsistency in the "global"
+%% state, causing global:sync/0 to never return.
+%%
+%% 1. A process is spawned.
+%% 2. If after 15", global:sync() didn't return, the "global"
+%% state is parsed.
+%% 3. If it detects that a sync is blocked for more than 10",
+%% the process sends fake nodedown/nodeup events to the two
+%% nodes involved (one local, one remote).
+%% 4. Both "global" instances restart their synchronisation.
+%% 5. globao:sync() finally returns.
+%%
+%% FIXME: Remove this workaround, once we got rid of the change to
+%% "dist_auto_connect" and fixed the bugs uncovered.
+
+global_sync() ->
+ Pid = spawn(fun workaround_global_hang/0),
+ ok = global:sync(),
+ Pid ! global_sync_done,
+ ok.
+
+workaround_global_hang() ->
+ receive
+ global_sync_done ->
+ ok
+ after 10000 ->
+ find_blocked_global_peers()
+ end.
+
+find_blocked_global_peers() ->
+ Snapshot1 = snapshot_global_dict(),
+ timer:sleep(10000),
+ Snapshot2 = snapshot_global_dict(),
+ find_blocked_global_peers1(Snapshot2, Snapshot1).
+
+snapshot_global_dict() ->
+ {status, _, _, [Dict | _]} = sys:get_status(global_name_server),
+ [E || {{sync_tag_his, _}, _} = E <- Dict].
+
+find_blocked_global_peers1([{{sync_tag_his, Peer}, _} = Item | Rest],
+ OlderSnapshot) ->
+ case lists:member(Item, OlderSnapshot) of
+ true -> unblock_global_peer(Peer);
+ false -> ok
+ end,
+ find_blocked_global_peers1(Rest, OlderSnapshot);
+find_blocked_global_peers1([], _) ->
+ ok.
+
+unblock_global_peer(PeerNode) ->
+ ThisNode = node(),
+ PeerState = rpc:call(PeerNode, sys, get_status, [global_name_server]),
+ error_logger:info_msg(
+ "Global hang workaround: global state on ~s seems broken~n"
+ " * Peer global state: ~p~n"
+ " * Local global state: ~p~n"
+ "Faking nodedown/nodeup between ~s and ~s~n",
+ [PeerNode, PeerState, sys:get_status(global_name_server),
+ PeerNode, ThisNode]),
+ {global_name_server, ThisNode} ! {nodedown, PeerNode},
+ {global_name_server, PeerNode} ! {nodedown, ThisNode},
+ {global_name_server, ThisNode} ! {nodeup, PeerNode},
+ {global_name_server, PeerNode} ! {nodeup, ThisNode},
+ ok.
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ %% We trap exits so that the supervisor will not just kill us. We
+ %% want to be sure that we are not going to be killed while
+ %% writing out the cluster status files - bad things can then
+ %% happen.
+ process_flag(trap_exit, true),
+ net_kernel:monitor_nodes(true, [nodedown_reason]),
+ {ok, _} = mnesia:subscribe(system),
+ %% If the node has been restarted, Mnesia can trigger a system notification
+ %% before the monitor subscribes to receive them. To avoid autoheal blocking due to
+ %% the inconsistent database event never arriving, we being monitoring all running
+ %% nodes as early as possible. The rest of the monitoring ops will only be triggered
+ %% when notifications arrive.
+ Nodes = possibly_partitioned_nodes(),
+ startup_log(Nodes),
+ Monitors = lists:foldl(fun(Node, Monitors0) ->
+ pmon:monitor({rabbit, Node}, Monitors0)
+ end, pmon:new(), Nodes),
+ {ok, ensure_keepalive_timer(#state{monitors = Monitors,
+ subscribers = pmon:new(),
+ partitions = [],
+ guid = rabbit_guid:gen(),
+ node_guids = maps:new(),
+ autoheal = rabbit_autoheal:init()})}.
+
+handle_call(partitions, _From, State = #state{partitions = Partitions}) ->
+ {reply, Partitions, State};
+
+handle_call(status, _From, State = #state{partitions = Partitions}) ->
+ {reply, [{partitions, Partitions},
+ {nodes, [node() | nodes()]}], State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(notify_node_up, State = #state{guid = GUID}) ->
+ Nodes = rabbit_nodes:all_running() -- [node()],
+ gen_server:abcast(Nodes, ?SERVER,
+ {node_up, node(), rabbit_mnesia:node_type(), GUID}),
+ %% register other active rabbits with this rabbit
+ DiskNodes = rabbit_mnesia:cluster_nodes(disc),
+ [gen_server:cast(?SERVER, {node_up, N, case lists:member(N, DiskNodes) of
+ true -> disc;
+ false -> ram
+ end}) || N <- Nodes],
+ {noreply, State};
+
+%%----------------------------------------------------------------------------
+%% Partial partition detection
+%%
+%% Every node generates a GUID each time it starts, and announces that
+%% GUID in 'node_up', with 'announce_guid' sent by return so the new
+%% node knows the GUIDs of the others. These GUIDs are sent in all the
+%% partial partition related messages to ensure that we ignore partial
+%% partition messages from before we restarted (to avoid getting stuck
+%% in a loop).
+%%
+%% When one node gets nodedown from another, it then sends
+%% 'check_partial_partition' to all the nodes it still thinks are
+%% alive. If any of those (intermediate) nodes still see the "down"
+%% node as up, they inform it that this has happened. The original
+%% node (in 'ignore', 'pause_if_all_down' or 'autoheal' mode) will then
+%% disconnect from the intermediate node to "upgrade" to a full
+%% partition.
+%%
+%% In pause_minority mode it will instead immediately pause until all
+%% nodes come back. This is because the contract for pause_minority is
+%% that nodes should never sit in a partitioned state - if it just
+%% disconnected, it would become a minority, pause, realise it's not
+%% in a minority any more, and come back, still partitioned (albeit no
+%% longer partially).
+%% ----------------------------------------------------------------------------
+
+handle_cast({node_up, Node, NodeType, GUID},
+ State = #state{guid = MyGUID,
+ node_guids = GUIDs}) ->
+ cast(Node, {announce_guid, node(), MyGUID}),
+ GUIDs1 = maps:put(Node, GUID, GUIDs),
+ handle_cast({node_up, Node, NodeType}, State#state{node_guids = GUIDs1});
+
+handle_cast({announce_guid, Node, GUID}, State = #state{node_guids = GUIDs}) ->
+ {noreply, State#state{node_guids = maps:put(Node, GUID, GUIDs)}};
+
+handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID},
+ State = #state{guid = MyGUID,
+ node_guids = GUIDs}) ->
+ case lists:member(Node, rabbit_nodes:all_running()) andalso
+ maps:find(Node, GUIDs) =:= {ok, NodeGUID} of
+ true -> spawn_link( %%[1]
+ fun () ->
+ case rpc:call(Node, rabbit, is_running, []) of
+ {badrpc, _} -> ok;
+ _ ->
+ rabbit_log:warning("Received a 'DOWN' message"
+ " from ~p but still can"
+ " communicate with it ~n",
+ [Node]),
+ cast(Rep, {partial_partition,
+ Node, node(), RepGUID})
+ end
+ end);
+ false -> ok
+ end,
+ {noreply, State};
+%% [1] We checked that we haven't heard the node go down - but we
+%% really should make sure we can actually communicate with
+%% it. Otherwise there's a race where we falsely detect a partial
+%% partition.
+%%
+%% Now of course the rpc:call/4 may take a long time to return if
+%% connectivity with the node is actually interrupted - but that's OK,
+%% we only really want to do something in a timely manner if
+%% connectivity is OK. However, of course as always we must not block
+%% the node monitor, so we do the check in a separate process.
+
+handle_cast({check_partial_partition, _Node, _Reporter,
+ _NodeGUID, _GUID, _ReporterGUID}, State) ->
+ {noreply, State};
+
+handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
+ State = #state{guid = MyGUID}) ->
+ FmtBase = "Partial partition detected:~n"
+ " * We saw DOWN from ~s~n"
+ " * We can still see ~s which can see ~s~n",
+ ArgsBase = [NotReallyDown, Proxy, NotReallyDown],
+ case application:get_env(rabbit, cluster_partition_handling) of
+ {ok, pause_minority} ->
+ rabbit_log:error(
+ FmtBase ++ " * pause_minority mode enabled~n"
+ "We will therefore pause until the *entire* cluster recovers~n",
+ ArgsBase),
+ await_cluster_recovery(fun all_nodes_up/0),
+ {noreply, State};
+ {ok, {pause_if_all_down, PreferredNodes, _}} ->
+ case in_preferred_partition(PreferredNodes) of
+ true -> rabbit_log:error(
+ FmtBase ++ "We will therefore intentionally "
+ "disconnect from ~s~n", ArgsBase ++ [Proxy]),
+ upgrade_to_full_partition(Proxy);
+ false -> rabbit_log:info(
+ FmtBase ++ "We are about to pause, no need "
+ "for further actions~n", ArgsBase)
+ end,
+ {noreply, State};
+ {ok, _} ->
+ rabbit_log:error(
+ FmtBase ++ "We will therefore intentionally disconnect from ~s~n",
+ ArgsBase ++ [Proxy]),
+ upgrade_to_full_partition(Proxy),
+ {noreply, State}
+ end;
+
+handle_cast({partial_partition, _GUID, _Reporter, _Proxy}, State) ->
+ {noreply, State};
+
+%% Sometimes it appears the Erlang VM does not give us nodedown
+%% messages reliably when another node disconnects from us. Therefore
+%% we are told just before the disconnection so we can reciprocate.
+handle_cast({partial_partition_disconnect, Other}, State) ->
+ rabbit_log:error("Partial partition disconnect from ~s~n", [Other]),
+ disconnect(Other),
+ {noreply, State};
+
+%% Note: when updating the status file, we can't simply write the
+%% mnesia information since the message can (and will) overtake the
+%% mnesia propagation.
+handle_cast({node_up, Node, NodeType},
+ State = #state{monitors = Monitors}) ->
+ rabbit_log:info("rabbit on node ~p up~n", [Node]),
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({add_node(Node, AllNodes),
+ case NodeType of
+ disc -> add_node(Node, DiscNodes);
+ ram -> DiscNodes
+ end,
+ add_node(Node, RunningNodes)}),
+ ok = handle_live_rabbit(Node),
+ Monitors1 = case pmon:is_monitored({rabbit, Node}, Monitors) of
+ true ->
+ Monitors;
+ false ->
+ pmon:monitor({rabbit, Node}, Monitors)
+ end,
+ {noreply, maybe_autoheal(State#state{monitors = Monitors1})};
+
+handle_cast({joined_cluster, Node, NodeType}, State) ->
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({add_node(Node, AllNodes),
+ case NodeType of
+ disc -> add_node(Node, DiscNodes);
+ ram -> DiscNodes
+ end,
+ RunningNodes}),
+ {noreply, State};
+
+handle_cast({left_cluster, Node}, State) ->
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({del_node(Node, AllNodes), del_node(Node, DiscNodes),
+ del_node(Node, RunningNodes)}),
+ {noreply, State};
+
+handle_cast({subscribe, Pid}, State = #state{subscribers = Subscribers}) ->
+ {noreply, State#state{subscribers = pmon:monitor(Pid, Subscribers)}};
+
+handle_cast(keepalive, State) ->
+ {noreply, State};
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason},
+ State = #state{monitors = Monitors, subscribers = Subscribers}) ->
+ rabbit_log:info("rabbit on node ~p down~n", [Node]),
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({AllNodes, DiscNodes, del_node(Node, RunningNodes)}),
+ [P ! {node_down, Node} || P <- pmon:monitored(Subscribers)],
+ {noreply, handle_dead_rabbit(
+ Node,
+ State#state{monitors = pmon:erase({rabbit, Node}, Monitors)})};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #state{subscribers = Subscribers}) ->
+ {noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}};
+
+handle_info({nodedown, Node, Info}, State = #state{guid = MyGUID,
+ node_guids = GUIDs}) ->
+ rabbit_log:info("node ~p down: ~p~n",
+ [Node, proplists:get_value(nodedown_reason, Info)]),
+ Check = fun (N, CheckGUID, DownGUID) ->
+ cast(N, {check_partial_partition,
+ Node, node(), DownGUID, CheckGUID, MyGUID})
+ end,
+ case maps:find(Node, GUIDs) of
+ {ok, DownGUID} -> Alive = rabbit_nodes:all_running()
+ -- [node(), Node],
+ [case maps:find(N, GUIDs) of
+ {ok, CheckGUID} -> Check(N, CheckGUID, DownGUID);
+ error -> ok
+ end || N <- Alive];
+ error -> ok
+ end,
+ {noreply, handle_dead_node(Node, State)};
+
+handle_info({nodeup, Node, _Info}, State) ->
+ rabbit_log:info("node ~p up~n", [Node]),
+ {noreply, State};
+
+handle_info({mnesia_system_event,
+ {inconsistent_database, running_partitioned_network, Node}},
+ State = #state{partitions = Partitions,
+ monitors = Monitors}) ->
+ %% We will not get a node_up from this node - yet we should treat it as
+ %% up (mostly).
+ State1 = case pmon:is_monitored({rabbit, Node}, Monitors) of
+ true -> State;
+ false -> State#state{
+ monitors = pmon:monitor({rabbit, Node}, Monitors)}
+ end,
+ ok = handle_live_rabbit(Node),
+ Partitions1 = lists:usort([Node | Partitions]),
+ {noreply, maybe_autoheal(State1#state{partitions = Partitions1})};
+
+handle_info({autoheal_msg, Msg}, State = #state{autoheal = AState,
+ partitions = Partitions}) ->
+ AState1 = rabbit_autoheal:handle_msg(Msg, AState, Partitions),
+ {noreply, State#state{autoheal = AState1}};
+
+handle_info(ping_down_nodes, State) ->
+ %% We ping nodes when some are down to ensure that we find out
+ %% about healed partitions quickly. We ping all nodes rather than
+ %% just the ones we know are down for simplicity; it's not expensive
+ %% to ping the nodes that are up, after all.
+ State1 = State#state{down_ping_timer = undefined},
+ Self = self(),
+ %% We ping in a separate process since in a partition it might
+ %% take some noticeable length of time and we don't want to block
+ %% the node monitor for that long.
+ spawn_link(fun () ->
+ ping_all(),
+ case all_nodes_up() of
+ true -> ok;
+ false -> Self ! ping_down_nodes_again
+ end
+ end),
+ {noreply, State1};
+
+handle_info(ping_down_nodes_again, State) ->
+ {noreply, ensure_ping_timer(State)};
+
+handle_info(ping_up_nodes, State) ->
+ %% In this case we need to ensure that we ping "quickly" -
+ %% i.e. only nodes that we know to be up.
+ [cast(N, keepalive) || N <- alive_nodes() -- [node()]],
+ {noreply, ensure_keepalive_timer(State#state{keepalive_timer = undefined})};
+
+handle_info({'EXIT', _, _} = Info, State = #state{autoheal = AState0}) ->
+ AState = rabbit_autoheal:process_down(Info, AState0),
+ {noreply, State#state{autoheal = AState}};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, State) ->
+ rabbit_misc:stop_timer(State, #state.down_ping_timer),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Functions that call the module specific hooks when nodes go up/down
+%%----------------------------------------------------------------------------
+
+handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
+ %% In general in rabbit_node_monitor we care about whether the
+ %% rabbit application is up rather than the node; we do this so
+ %% that we can respond in the same way to "rabbitmqctl stop_app"
+ %% and "rabbitmqctl stop" as much as possible.
+ %%
+ %% However, for pause_minority and pause_if_all_down modes we can't do
+ %% this, since we depend on looking at whether other nodes are up
+ %% to decide whether to come back up ourselves - if we decide that
+ %% based on the rabbit application we would go down and never come
+ %% back.
+ case application:get_env(rabbit, cluster_partition_handling) of
+ {ok, pause_minority} ->
+ case majority([Node]) of
+ true -> ok;
+ false -> await_cluster_recovery(fun majority/0)
+ end,
+ State;
+ {ok, {pause_if_all_down, PreferredNodes, HowToRecover}} ->
+ case in_preferred_partition(PreferredNodes, [Node]) of
+ true -> ok;
+ false -> await_cluster_recovery(
+ fun in_preferred_partition/0)
+ end,
+ case HowToRecover of
+ autoheal -> State#state{autoheal =
+ rabbit_autoheal:node_down(Node, Autoheal)};
+ _ -> State
+ end;
+ {ok, ignore} ->
+ State;
+ {ok, autoheal} ->
+ State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)};
+ {ok, Term} ->
+ rabbit_log:warning("cluster_partition_handling ~p unrecognised, "
+ "assuming 'ignore'~n", [Term]),
+ State
+ end.
+
+await_cluster_recovery(Condition) ->
+ rabbit_log:warning("Cluster minority/secondary status detected - "
+ "awaiting recovery~n", []),
+ run_outside_applications(fun () ->
+ rabbit:stop(),
+ wait_for_cluster_recovery(Condition)
+ end, false),
+ ok.
+
+run_outside_applications(Fun, WaitForExistingProcess) ->
+ spawn_link(fun () ->
+ %% Ignore exit messages from the monitor - the link is needed
+ %% to ensure the monitor detects abnormal exits from this process
+ %% and can reset the 'restarting' status on the autoheal, avoiding
+ %% a deadlock. The monitor is restarted when rabbit does, so messages
+ %% in the other direction should be ignored.
+ process_flag(trap_exit, true),
+ %% If our group leader is inside an application we are about
+ %% to stop, application:stop/1 does not return.
+ group_leader(whereis(init), self()),
+ register_outside_app_process(Fun, WaitForExistingProcess)
+ end).
+
+register_outside_app_process(Fun, WaitForExistingProcess) ->
+ %% Ensure only one such process at a time, the exit(badarg) is
+ %% harmless if one is already running.
+ %%
+ %% If WaitForExistingProcess is false, the given fun is simply not
+ %% executed at all and the process exits.
+ %%
+ %% If WaitForExistingProcess is true, we wait for the end of the
+ %% currently running process before executing the given function.
+ try register(rabbit_outside_app_process, self()) of
+ true ->
+ do_run_outside_app_fun(Fun)
+ catch
+ error:badarg when WaitForExistingProcess ->
+ MRef = erlang:monitor(process, rabbit_outside_app_process),
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ %% The existing process exited, let's try to
+ %% register again.
+ register_outside_app_process(Fun, WaitForExistingProcess)
+ end;
+ error:badarg ->
+ ok
+ end.
+
+do_run_outside_app_fun(Fun) ->
+ try
+ Fun()
+ catch _:E:Stacktrace ->
+ rabbit_log:error(
+ "rabbit_outside_app_process:~n~p~n~p~n",
+ [E, Stacktrace])
+ end.
+
+wait_for_cluster_recovery(Condition) ->
+ ping_all(),
+ case Condition() of
+ true -> rabbit:start();
+ false -> timer:sleep(?RABBIT_DOWN_PING_INTERVAL),
+ wait_for_cluster_recovery(Condition)
+ end.
+
+handle_dead_rabbit(Node, State = #state{partitions = Partitions,
+ autoheal = Autoheal}) ->
+ %% TODO: This may turn out to be a performance hog when there are
+ %% lots of nodes. We really only need to execute some of these
+ %% statements on *one* node, rather than all of them.
+ ok = rabbit_networking:on_node_down(Node),
+ ok = rabbit_amqqueue:on_node_down(Node),
+ ok = rabbit_alarm:on_node_down(Node),
+ ok = rabbit_mnesia:on_node_down(Node),
+ %% If we have been partitioned, and we are now in the only remaining
+ %% partition, we no longer care about partitions - forget them. Note
+ %% that we do not attempt to deal with individual (other) partitions
+ %% going away. It's only safe to forget anything about partitions when
+ %% there are no partitions.
+ Down = Partitions -- alive_rabbit_nodes(),
+ NoLongerPartitioned = rabbit_nodes:all_running(),
+ Partitions1 = case Partitions -- Down -- NoLongerPartitioned of
+ [] -> [];
+ _ -> Partitions
+ end,
+ ensure_ping_timer(
+ State#state{partitions = Partitions1,
+ autoheal = rabbit_autoheal:rabbit_down(Node, Autoheal)}).
+
+ensure_ping_timer(State) ->
+ rabbit_misc:ensure_timer(
+ State, #state.down_ping_timer, ?RABBIT_DOWN_PING_INTERVAL,
+ ping_down_nodes).
+
+ensure_keepalive_timer(State) ->
+ {ok, Interval} = application:get_env(rabbit, cluster_keepalive_interval),
+ rabbit_misc:ensure_timer(
+ State, #state.keepalive_timer, Interval, ping_up_nodes).
+
+handle_live_rabbit(Node) ->
+ ok = rabbit_amqqueue:on_node_up(Node),
+ ok = rabbit_alarm:on_node_up(Node),
+ ok = rabbit_mnesia:on_node_up(Node).
+
+maybe_autoheal(State = #state{partitions = []}) ->
+ State;
+
+maybe_autoheal(State = #state{autoheal = AState}) ->
+ case all_nodes_up() of
+ true -> State#state{autoheal = rabbit_autoheal:maybe_start(AState)};
+ false -> State
+ end.
+
+%%--------------------------------------------------------------------
+%% Internal utils
+%%--------------------------------------------------------------------
+
+try_read_file(FileName) ->
+ case rabbit_file:read_term_file(FileName) of
+ {ok, Term} -> {ok, Term};
+ {error, enoent} -> {error, enoent};
+ {error, E} -> throw({error, {cannot_read_file, FileName, E}})
+ end.
+
+legacy_cluster_nodes(Nodes) ->
+ %% We get all the info that we can, including the nodes from
+ %% mnesia, which will be there if the node is a disc node (empty
+ %% list otherwise)
+ lists:usort(Nodes ++ mnesia:system_info(db_nodes)).
+
+legacy_disc_nodes(AllNodes) ->
+ case AllNodes == [] orelse lists:member(node(), AllNodes) of
+ true -> [node()];
+ false -> []
+ end.
+
+add_node(Node, Nodes) -> lists:usort([Node | Nodes]).
+
+del_node(Node, Nodes) -> Nodes -- [Node].
+
+cast(Node, Msg) -> gen_server:cast({?SERVER, Node}, Msg).
+
+upgrade_to_full_partition(Proxy) ->
+ cast(Proxy, {partial_partition_disconnect, node()}),
+ disconnect(Proxy).
+
+%% When we call this, it's because we want to force Mnesia to detect a
+%% partition. But if we just disconnect_node/1 then Mnesia won't
+%% detect a very short partition. So we want to force a slightly
+%% longer disconnect. Unfortunately we don't have a way to blacklist
+%% individual nodes; the best we can do is turn off auto-connect
+%% altogether.
+disconnect(Node) ->
+ application:set_env(kernel, dist_auto_connect, never),
+ erlang:disconnect_node(Node),
+ timer:sleep(1000),
+ application:unset_env(kernel, dist_auto_connect),
+ ok.
+
+%%--------------------------------------------------------------------
+
+%% mnesia:system_info(db_nodes) (and hence
+%% rabbit_nodes:all_running()) does not return all nodes
+%% when partitioned, just those that we are sharing Mnesia state
+%% with. So we have a small set of replacement functions
+%% here. "rabbit" in a function's name implies we test if the rabbit
+%% application is up, not just the node.
+
+%% As we use these functions to decide what to do in pause_minority or
+%% pause_if_all_down states, they *must* be fast, even in the case where
+%% TCP connections are timing out. So that means we should be careful
+%% about whether we connect to nodes which are currently disconnected.
+
+majority() ->
+ majority([]).
+
+majority(NodesDown) ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ AliveNodes = alive_nodes(Nodes) -- NodesDown,
+ length(AliveNodes) / length(Nodes) > 0.5.
+
+in_preferred_partition() ->
+ {ok, {pause_if_all_down, PreferredNodes, _}} =
+ application:get_env(rabbit, cluster_partition_handling),
+ in_preferred_partition(PreferredNodes).
+
+in_preferred_partition(PreferredNodes) ->
+ in_preferred_partition(PreferredNodes, []).
+
+in_preferred_partition(PreferredNodes, NodesDown) ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ RealPreferredNodes = [N || N <- PreferredNodes, lists:member(N, Nodes)],
+ AliveNodes = alive_nodes(RealPreferredNodes) -- NodesDown,
+ RealPreferredNodes =:= [] orelse AliveNodes =/= [].
+
+all_nodes_up() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ length(alive_nodes(Nodes)) =:= length(Nodes).
+
+-spec all_rabbit_nodes_up() -> boolean().
+
+all_rabbit_nodes_up() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ length(alive_rabbit_nodes(Nodes)) =:= length(Nodes).
+
+-spec alive_nodes([node()]) -> [node()].
+
+alive_nodes() -> alive_nodes(rabbit_mnesia:cluster_nodes(all)).
+alive_nodes(Nodes) -> [N || N <- Nodes, lists:member(N, [node()|nodes()])].
+
+-spec alive_rabbit_nodes([node()]) -> [node()].
+
+alive_rabbit_nodes() -> alive_rabbit_nodes(rabbit_mnesia:cluster_nodes(all)).
+
+alive_rabbit_nodes(Nodes) ->
+ [N || N <- alive_nodes(Nodes), rabbit:is_running(N)].
+
+%% This one is allowed to connect!
+
+-spec ping_all() -> 'ok'.
+
+ping_all() ->
+ [net_adm:ping(N) || N <- rabbit_mnesia:cluster_nodes(all)],
+ ok.
+
+possibly_partitioned_nodes() ->
+ alive_rabbit_nodes() -- rabbit_nodes:all_running().
+
+startup_log([]) ->
+ rabbit_log:info("Starting rabbit_node_monitor~n", []);
+startup_log(Nodes) ->
+ rabbit_log:info("Starting rabbit_node_monitor, might be partitioned from ~p~n",
+ [Nodes]).
diff --git a/deps/rabbit/src/rabbit_nodes.erl b/deps/rabbit/src/rabbit_nodes.erl
new file mode 100644
index 0000000000..3034a4d513
--- /dev/null
+++ b/deps/rabbit/src/rabbit_nodes.erl
@@ -0,0 +1,157 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_nodes).
+
+-export([names/1, diagnostics/1, make/1, make/2, parts/1, cookie_hash/0,
+ is_running/2, is_process_running/2,
+ cluster_name/0, set_cluster_name/1, set_cluster_name/2, ensure_epmd/0,
+ all_running/0, name_type/0, running_count/0, total_count/0,
+ await_running_count/2, is_single_node_cluster/0,
+ boot/0]).
+-export([persistent_cluster_id/0, seed_internal_cluster_id/0, seed_user_provided_cluster_name/0]).
+
+-include_lib("kernel/include/inet.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(SAMPLING_INTERVAL, 1000).
+
+-define(INTERNAL_CLUSTER_ID_PARAM_NAME, internal_cluster_id).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+boot() ->
+ seed_internal_cluster_id(),
+ seed_user_provided_cluster_name().
+
+name_type() ->
+ #{nodename_type := NodeType} = rabbit_prelaunch:get_context(),
+ NodeType.
+
+-spec names(string()) ->
+ rabbit_types:ok_or_error2([{string(), integer()}], term()).
+
+names(Hostname) ->
+ rabbit_nodes_common:names(Hostname).
+
+-spec diagnostics([node()]) -> string().
+
+diagnostics(Nodes) ->
+ rabbit_nodes_common:diagnostics(Nodes).
+
+make(NameOrParts) ->
+ rabbit_nodes_common:make(NameOrParts).
+
+make(ShortName, Hostname) ->
+ make({ShortName, Hostname}).
+
+parts(NodeStr) ->
+ rabbit_nodes_common:parts(NodeStr).
+
+-spec cookie_hash() -> string().
+
+cookie_hash() ->
+ rabbit_nodes_common:cookie_hash().
+
+-spec is_running(node(), atom()) -> boolean().
+
+is_running(Node, Application) ->
+ rabbit_nodes_common:is_running(Node, Application).
+
+-spec is_process_running(node(), atom()) -> boolean().
+
+is_process_running(Node, Process) ->
+ rabbit_nodes_common:is_process_running(Node, Process).
+
+-spec cluster_name() -> binary().
+
+cluster_name() ->
+ rabbit_runtime_parameters:value_global(
+ cluster_name, cluster_name_default()).
+
+cluster_name_default() ->
+ {ID, _} = parts(node()),
+ FQDN = rabbit_net:hostname(),
+ list_to_binary(atom_to_list(make({ID, FQDN}))).
+
+-spec persistent_cluster_id() -> binary().
+persistent_cluster_id() ->
+ case rabbit_runtime_parameters:lookup_global(?INTERNAL_CLUSTER_ID_PARAM_NAME) of
+ not_found ->
+ seed_internal_cluster_id(),
+ persistent_cluster_id();
+ Param ->
+ #{value := Val, name := ?INTERNAL_CLUSTER_ID_PARAM_NAME} = maps:from_list(Param),
+ Val
+ end.
+
+-spec seed_internal_cluster_id() -> binary().
+seed_internal_cluster_id() ->
+ case rabbit_runtime_parameters:lookup_global(?INTERNAL_CLUSTER_ID_PARAM_NAME) of
+ not_found ->
+ Id = rabbit_guid:binary(rabbit_guid:gen(), "rabbitmq-cluster-id"),
+ rabbit_log:info("Initialising internal cluster ID to '~s'", [Id]),
+ rabbit_runtime_parameters:set_global(?INTERNAL_CLUSTER_ID_PARAM_NAME, Id, ?INTERNAL_USER),
+ Id;
+ Param ->
+ #{value := Val, name := ?INTERNAL_CLUSTER_ID_PARAM_NAME} = maps:from_list(Param),
+ Val
+ end.
+
+seed_user_provided_cluster_name() ->
+ case application:get_env(rabbit, cluster_name) of
+ undefined -> ok;
+ {ok, Name} ->
+ rabbit_log:info("Setting cluster name to '~s' as configured", [Name]),
+ set_cluster_name(rabbit_data_coercion:to_binary(Name))
+ end.
+
+-spec set_cluster_name(binary()) -> 'ok'.
+
+set_cluster_name(Name) ->
+ set_cluster_name(Name, ?INTERNAL_USER).
+
+-spec set_cluster_name(binary(), rabbit_types:username()) -> 'ok'.
+
+set_cluster_name(Name, Username) ->
+ %% Cluster name should be binary
+ BinaryName = rabbit_data_coercion:to_binary(Name),
+ rabbit_runtime_parameters:set_global(cluster_name, BinaryName, Username).
+
+ensure_epmd() ->
+ rabbit_nodes_common:ensure_epmd().
+
+-spec all_running() -> [node()].
+all_running() -> rabbit_mnesia:cluster_nodes(running).
+
+-spec running_count() -> integer().
+running_count() -> length(all_running()).
+
+-spec total_count() -> integer().
+total_count() -> length(rabbit_mnesia:cluster_nodes(all)).
+
+-spec is_single_node_cluster() -> boolean().
+is_single_node_cluster() ->
+ total_count() =:= 1.
+
+-spec await_running_count(integer(), integer()) -> 'ok' | {'error', atom()}.
+await_running_count(TargetCount, Timeout) ->
+ Retries = round(Timeout/?SAMPLING_INTERVAL),
+ await_running_count_with_retries(TargetCount, Retries).
+
+await_running_count_with_retries(1, _Retries) -> ok;
+await_running_count_with_retries(_TargetCount, Retries) when Retries =:= 0 ->
+ {error, timeout};
+await_running_count_with_retries(TargetCount, Retries) ->
+ case running_count() >= TargetCount of
+ true -> ok;
+ false ->
+ timer:sleep(?SAMPLING_INTERVAL),
+ await_running_count_with_retries(TargetCount, Retries - 1)
+ end.
diff --git a/deps/rabbit/src/rabbit_osiris_metrics.erl b/deps/rabbit/src/rabbit_osiris_metrics.erl
new file mode 100644
index 0000000000..7b2574c7e1
--- /dev/null
+++ b/deps/rabbit/src/rabbit_osiris_metrics.erl
@@ -0,0 +1,103 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_osiris_metrics).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(TICK_TIMEOUT, 5000).
+-define(SERVER, ?MODULE).
+
+-define(STATISTICS_KEYS,
+ [policy,
+ operator_policy,
+ effective_policy_definition,
+ state,
+ leader,
+ online,
+ members
+ ]).
+
+-record(state, {timeout :: non_neg_integer()}).
+
+%%----------------------------------------------------------------------------
+%% Starts the raw metrics storage and owns the ETS tables.
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+init([]) ->
+ Timeout = application:get_env(rabbit, stream_tick_interval,
+ ?TICK_TIMEOUT),
+ erlang:send_after(Timeout, self(), tick),
+ {ok, #state{timeout = Timeout}}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(tick, #state{timeout = Timeout} = State) ->
+ Data = osiris_counters:overview(),
+ maps:map(
+ fun ({osiris_writer, QName}, #{offset := Offs,
+ first_offset := FstOffs}) ->
+ COffs = Offs + 1 - FstOffs,
+ rabbit_core_metrics:queue_stats(QName, COffs, 0, COffs, 0),
+ Infos = try
+ %% TODO complete stats!
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ rabbit_stream_queue:info(Q, ?STATISTICS_KEYS);
+ _ ->
+ []
+ end
+ catch
+ _:_ ->
+ %% It's possible that the writer has died but
+ %% it's still on the amqqueue record, so the
+ %% `erlang:process_info/2` calls will return
+ %% `undefined` and crash with a badmatch.
+ %% At least for now, skipping the metrics might
+ %% be the best option. Otherwise this brings
+ %% down `rabbit_sup` and the whole `rabbit` app.
+ []
+ end,
+ rabbit_core_metrics:queue_stats(QName, Infos),
+ rabbit_event:notify(queue_stats, Infos ++ [{name, QName},
+ {messages, COffs},
+ {messages_ready, COffs},
+ {messages_unacknowledged, 0}]),
+ ok;
+ (_, _V) ->
+ ok
+ end, Data),
+ erlang:send_after(Timeout, self(), tick),
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_parameter_validation.erl b/deps/rabbit/src/rabbit_parameter_validation.erl
new file mode 100644
index 0000000000..66287ec799
--- /dev/null
+++ b/deps/rabbit/src/rabbit_parameter_validation.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_parameter_validation).
+
+-export([number/2, integer/2, binary/2, boolean/2, list/2, regex/2, proplist/3, enum/1]).
+
+number(_Name, Term) when is_number(Term) ->
+ ok;
+
+number(Name, Term) ->
+ {error, "~s should be a number, actually was ~p", [Name, Term]}.
+
+integer(_Name, Term) when is_integer(Term) ->
+ ok;
+
+integer(Name, Term) ->
+ {error, "~s should be a number, actually was ~p", [Name, Term]}.
+
+binary(_Name, Term) when is_binary(Term) ->
+ ok;
+
+binary(Name, Term) ->
+ {error, "~s should be binary, actually was ~p", [Name, Term]}.
+
+boolean(_Name, Term) when is_boolean(Term) ->
+ ok;
+boolean(Name, Term) ->
+ {error, "~s should be boolean, actually was ~p", [Name, Term]}.
+
+list(_Name, Term) when is_list(Term) ->
+ ok;
+
+list(Name, Term) ->
+ {error, "~s should be list, actually was ~p", [Name, Term]}.
+
+regex(Name, Term) when is_binary(Term) ->
+ case re:compile(Term) of
+ {ok, _} -> ok;
+ {error, Reason} -> {error, "~s should be regular expression "
+ "but is invalid: ~p", [Name, Reason]}
+ end;
+regex(Name, Term) ->
+ {error, "~s should be a binary but was ~p", [Name, Term]}.
+
+proplist(Name, Constraints, Term) when is_list(Term) ->
+ {Results, Remainder}
+ = lists:foldl(
+ fun ({Key, Fun, Needed}, {Results0, Term0}) ->
+ case {lists:keytake(Key, 1, Term0), Needed} of
+ {{value, {Key, Value}, Term1}, _} ->
+ {[Fun(Key, Value) | Results0],
+ Term1};
+ {false, mandatory} ->
+ {[{error, "Key \"~s\" not found in ~s",
+ [Key, Name]} | Results0], Term0};
+ {false, optional} ->
+ {Results0, Term0}
+ end
+ end, {[], Term}, Constraints),
+ case Remainder of
+ [] -> Results;
+ _ -> [{error, "Unrecognised terms ~p in ~s", [Remainder, Name]}
+ | Results]
+ end;
+
+proplist(Name, Constraints, Term0) when is_map(Term0) ->
+ Term = maps:to_list(Term0),
+ proplist(Name, Constraints, Term);
+
+proplist(Name, _Constraints, Term) ->
+ {error, "~s not a list ~p", [Name, Term]}.
+
+enum(OptionsA) ->
+ Options = [list_to_binary(atom_to_list(O)) || O <- OptionsA],
+ fun (Name, Term) when is_binary(Term) ->
+ case lists:member(Term, Options) of
+ true -> ok;
+ false -> {error, "~s should be one of ~p, actually was ~p",
+ [Name, Options, Term]}
+ end;
+ (Name, Term) ->
+ {error, "~s should be binary, actually was ~p", [Name, Term]}
+ end.
diff --git a/deps/rabbit/src/rabbit_password.erl b/deps/rabbit/src/rabbit_password.erl
new file mode 100644
index 0000000000..6a5254b707
--- /dev/null
+++ b/deps/rabbit/src/rabbit_password.erl
@@ -0,0 +1,52 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_password).
+-include("rabbit.hrl").
+
+-define(DEFAULT_HASHING_MODULE, rabbit_password_hashing_sha256).
+
+%%
+%% API
+%%
+
+-export([hash/1, hash/2, generate_salt/0, salted_hash/2, salted_hash/3,
+ hashing_mod/0, hashing_mod/1]).
+
+hash(Cleartext) ->
+ hash(hashing_mod(), Cleartext).
+
+hash(HashingMod, Cleartext) ->
+ SaltBin = generate_salt(),
+ Hash = salted_hash(HashingMod, SaltBin, Cleartext),
+ <<SaltBin/binary, Hash/binary>>.
+
+generate_salt() ->
+ Salt = rand:uniform(16#ffffffff),
+ <<Salt:32>>.
+
+salted_hash(Salt, Cleartext) ->
+ salted_hash(hashing_mod(), Salt, Cleartext).
+
+salted_hash(Mod, Salt, Cleartext) ->
+ Fun = fun Mod:hash/1,
+ Fun(<<Salt/binary, Cleartext/binary>>).
+
+hashing_mod() ->
+ rabbit_misc:get_env(rabbit, password_hashing_module,
+ ?DEFAULT_HASHING_MODULE).
+
+hashing_mod(rabbit_password_hashing_sha256) ->
+ rabbit_password_hashing_sha256;
+hashing_mod(rabbit_password_hashing_md5) ->
+ rabbit_password_hashing_md5;
+%% fall back to the hashing function that's been used prior to 3.6.0
+hashing_mod(undefined) ->
+ rabbit_password_hashing_md5;
+%% if a custom module is configured, simply use it
+hashing_mod(CustomMod) when is_atom(CustomMod) ->
+ CustomMod.
diff --git a/deps/rabbit/src/rabbit_password_hashing_md5.erl b/deps/rabbit/src/rabbit_password_hashing_md5.erl
new file mode 100644
index 0000000000..1e306673ca
--- /dev/null
+++ b/deps/rabbit/src/rabbit_password_hashing_md5.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Legacy hashing implementation, only used as a last resort when
+%% #internal_user.hashing_algorithm is md5 or undefined (the case in
+%% pre-3.6.0 user records).
+
+-module(rabbit_password_hashing_md5).
+
+-behaviour(rabbit_password_hashing).
+
+-export([hash/1]).
+
+hash(Binary) ->
+ erlang:md5(Binary).
diff --git a/deps/rabbit/src/rabbit_password_hashing_sha256.erl b/deps/rabbit/src/rabbit_password_hashing_sha256.erl
new file mode 100644
index 0000000000..3ccc298efd
--- /dev/null
+++ b/deps/rabbit/src/rabbit_password_hashing_sha256.erl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_password_hashing_sha256).
+
+-behaviour(rabbit_password_hashing).
+
+-export([hash/1]).
+
+hash(Binary) ->
+ crypto:hash(sha256, Binary).
diff --git a/deps/rabbit/src/rabbit_password_hashing_sha512.erl b/deps/rabbit/src/rabbit_password_hashing_sha512.erl
new file mode 100644
index 0000000000..c5edf8888a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_password_hashing_sha512.erl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_password_hashing_sha512).
+
+-behaviour(rabbit_password_hashing).
+
+-export([hash/1]).
+
+hash(Binary) ->
+ crypto:hash(sha512, Binary).
diff --git a/deps/rabbit/src/rabbit_peer_discovery.erl b/deps/rabbit/src/rabbit_peer_discovery.erl
new file mode 100644
index 0000000000..1688579450
--- /dev/null
+++ b/deps/rabbit/src/rabbit_peer_discovery.erl
@@ -0,0 +1,326 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery).
+
+%%
+%% API
+%%
+
+-export([maybe_init/0, discover_cluster_nodes/0, backend/0, node_type/0,
+ normalize/1, format_discovered_nodes/1, log_configured_backend/0,
+ register/0, unregister/0, maybe_register/0, maybe_unregister/0,
+ maybe_inject_randomized_delay/0, lock/0, unlock/1,
+ discovery_retries/0]).
+-export([append_node_prefix/1, node_prefix/0, locking_retry_timeout/0,
+ lock_acquisition_failure_mode/0]).
+
+-define(DEFAULT_BACKEND, rabbit_peer_discovery_classic_config).
+
+%% what node type is used by default for this node when joining
+%% a new cluster as a virgin node
+-define(DEFAULT_NODE_TYPE, disc).
+
+%% default node prefix to attach to discovered hostnames
+-define(DEFAULT_PREFIX, "rabbit").
+
+%% default randomized delay range, in seconds
+-define(DEFAULT_STARTUP_RANDOMIZED_DELAY, {5, 60}).
+
+%% default discovery retries and interval.
+-define(DEFAULT_DISCOVERY_RETRY_COUNT, 10).
+-define(DEFAULT_DISCOVERY_RETRY_INTERVAL_MS, 500).
+
+-define(NODENAME_PART_SEPARATOR, "@").
+
+-spec backend() -> atom().
+
+backend() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ proplists:get_value(peer_discovery_backend, Proplist, ?DEFAULT_BACKEND);
+ undefined ->
+ ?DEFAULT_BACKEND
+ end.
+
+
+
+-spec node_type() -> rabbit_types:node_type().
+
+node_type() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ proplists:get_value(node_type, Proplist, ?DEFAULT_NODE_TYPE);
+ undefined ->
+ ?DEFAULT_NODE_TYPE
+ end.
+
+-spec locking_retry_timeout() -> {Retries :: integer(), Timeout :: integer()}.
+
+locking_retry_timeout() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ Retries = proplists:get_value(lock_retry_limit, Proplist, 10),
+ Timeout = proplists:get_value(lock_retry_timeout, Proplist, 30000),
+ {Retries, Timeout};
+ undefined ->
+ {10, 30000}
+ end.
+
+-spec lock_acquisition_failure_mode() -> ignore | fail.
+
+lock_acquisition_failure_mode() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ proplists:get_value(lock_acquisition_failure_mode, Proplist, fail);
+ undefined ->
+ fail
+ end.
+
+-spec log_configured_backend() -> ok.
+
+log_configured_backend() ->
+ rabbit_log:info("Configured peer discovery backend: ~s~n", [backend()]).
+
+maybe_init() ->
+ Backend = backend(),
+ code:ensure_loaded(Backend),
+ case erlang:function_exported(Backend, init, 0) of
+ true ->
+ rabbit_log:debug("Peer discovery backend supports initialisation"),
+ case Backend:init() of
+ ok ->
+ rabbit_log:debug("Peer discovery backend initialisation succeeded"),
+ ok;
+ {error, Error} ->
+ rabbit_log:warning("Peer discovery backend initialisation failed: ~p.", [Error]),
+ ok
+ end;
+ false ->
+ rabbit_log:debug("Peer discovery backend does not support initialisation"),
+ ok
+ end.
+
+
+%% This module doesn't currently sanity-check the return value of
+%% `Backend:list_nodes()`. Therefore, it could return something invalid:
+%% thus the `{Å“k, any()} in the spec.
+%%
+%% `rabbit_mnesia:init_from_config()` does some verifications.
+
+-spec discover_cluster_nodes() ->
+ {ok, {Nodes :: [node()], NodeType :: rabbit_types:node_type()} | any()} |
+ {error, Reason :: string()}.
+
+discover_cluster_nodes() ->
+ Backend = backend(),
+ normalize(Backend:list_nodes()).
+
+
+-spec maybe_register() -> ok.
+
+maybe_register() ->
+ Backend = backend(),
+ case Backend:supports_registration() of
+ true ->
+ register(),
+ Backend:post_registration();
+ false ->
+ rabbit_log:info("Peer discovery backend ~s does not support registration, skipping registration.", [Backend]),
+ ok
+ end.
+
+
+-spec maybe_unregister() -> ok.
+
+maybe_unregister() ->
+ Backend = backend(),
+ case Backend:supports_registration() of
+ true ->
+ unregister();
+ false ->
+ rabbit_log:info("Peer discovery backend ~s does not support registration, skipping unregistration.", [Backend]),
+ ok
+ end.
+
+-spec discovery_retries() -> {Retries :: integer(), Interval :: integer()}.
+
+discovery_retries() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ Retries = proplists:get_value(discovery_retry_limit, Proplist, ?DEFAULT_DISCOVERY_RETRY_COUNT),
+ Interval = proplists:get_value(discovery_retry_interval, Proplist, ?DEFAULT_DISCOVERY_RETRY_INTERVAL_MS),
+ {Retries, Interval};
+ undefined ->
+ {?DEFAULT_DISCOVERY_RETRY_COUNT, ?DEFAULT_DISCOVERY_RETRY_INTERVAL_MS}
+ end.
+
+
+-spec maybe_inject_randomized_delay() -> ok.
+maybe_inject_randomized_delay() ->
+ Backend = backend(),
+ case Backend:supports_registration() of
+ true ->
+ rabbit_log:info("Peer discovery backend ~s supports registration.", [Backend]),
+ inject_randomized_delay();
+ false ->
+ rabbit_log:info("Peer discovery backend ~s does not support registration, skipping randomized startup delay.", [Backend]),
+ ok
+ end.
+
+-spec inject_randomized_delay() -> ok.
+
+inject_randomized_delay() ->
+ {Min, Max} = randomized_delay_range_in_ms(),
+ case {Min, Max} of
+ %% When the max value is set to 0, consider the delay to be disabled.
+ %% In addition, `rand:uniform/1` will fail with a "no function clause"
+ %% when the argument is 0.
+ {_, 0} ->
+ rabbit_log:info("Randomized delay range's upper bound is set to 0. Considering it disabled."),
+ ok;
+ {_, N} when is_number(N) ->
+ rand:seed(exsplus),
+ RandomVal = rand:uniform(round(N)),
+ rabbit_log:debug("Randomized startup delay: configured range is from ~p to ~p milliseconds, PRNG pick: ~p...",
+ [Min, Max, RandomVal]),
+ Effective = case RandomVal < Min of
+ true -> Min;
+ false -> RandomVal
+ end,
+ rabbit_log:info("Will wait for ~p milliseconds before proceeding with registration...", [Effective]),
+ timer:sleep(Effective),
+ ok
+ end.
+
+-spec randomized_delay_range_in_ms() -> {integer(), integer()}.
+
+randomized_delay_range_in_ms() ->
+ Backend = backend(),
+ Default = case erlang:function_exported(Backend, randomized_startup_delay_range, 0) of
+ true -> Backend:randomized_startup_delay_range();
+ false -> ?DEFAULT_STARTUP_RANDOMIZED_DELAY
+ end,
+ {Min, Max} = case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ proplists:get_value(randomized_startup_delay_range, Proplist, Default);
+ undefined ->
+ Default
+ end,
+ {Min * 1000, Max * 1000}.
+
+
+-spec register() -> ok.
+
+register() ->
+ Backend = backend(),
+ rabbit_log:info("Will register with peer discovery backend ~s", [Backend]),
+ case Backend:register() of
+ ok -> ok;
+ {error, Error} ->
+ rabbit_log:error("Failed to register with peer discovery backend ~s: ~p",
+ [Backend, Error]),
+ ok
+ end.
+
+
+-spec unregister() -> ok.
+
+unregister() ->
+ Backend = backend(),
+ rabbit_log:info("Will unregister with peer discovery backend ~s", [Backend]),
+ case Backend:unregister() of
+ ok -> ok;
+ {error, Error} ->
+ rabbit_log:error("Failed to unregister with peer discovery backend ~s: ~p",
+ [Backend, Error]),
+ ok
+ end.
+
+-spec lock() -> {ok, Data :: term()} | not_supported | {error, Reason :: string()}.
+
+lock() ->
+ Backend = backend(),
+ rabbit_log:info("Will try to lock with peer discovery backend ~s", [Backend]),
+ case Backend:lock(node()) of
+ {error, Reason} = Error ->
+ rabbit_log:error("Failed to lock with peer discovery backend ~s: ~p",
+ [Backend, Reason]),
+ Error;
+ Any ->
+ Any
+ end.
+
+-spec unlock(Data :: term()) -> ok | {error, Reason :: string()}.
+
+unlock(Data) ->
+ Backend = backend(),
+ rabbit_log:info("Will try to unlock with peer discovery backend ~s", [Backend]),
+ case Backend:unlock(Data) of
+ {error, Reason} = Error ->
+ rabbit_log:error("Failed to unlock with peer discovery backend ~s: ~p, "
+ "lock data: ~p",
+ [Backend, Reason, Data]),
+ Error;
+ Any ->
+ Any
+ end.
+
+%%
+%% Implementation
+%%
+
+-spec normalize(Nodes :: [node()] |
+ {Nodes :: [node()],
+ NodeType :: rabbit_types:node_type()} |
+ {ok, Nodes :: [node()]} |
+ {ok, {Nodes :: [node()],
+ NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}) ->
+ {ok, {Nodes :: [node()], NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+
+normalize(Nodes) when is_list(Nodes) ->
+ {ok, {Nodes, disc}};
+normalize({Nodes, NodeType}) when is_list(Nodes) andalso is_atom(NodeType) ->
+ {ok, {Nodes, NodeType}};
+normalize({ok, Nodes}) when is_list(Nodes) ->
+ {ok, {Nodes, disc}};
+normalize({ok, {Nodes, NodeType}}) when is_list(Nodes) andalso is_atom(NodeType) ->
+ {ok, {Nodes, NodeType}};
+normalize({error, Reason}) ->
+ {error, Reason}.
+
+-spec format_discovered_nodes(Nodes :: list()) -> string().
+
+format_discovered_nodes(Nodes) ->
+ %% NOTE: in OTP 21 string:join/2 is deprecated but still available.
+ %% Its recommended replacement is not a drop-in one, though, so
+ %% we will not be switching just yet.
+ string:join(lists:map(fun rabbit_data_coercion:to_list/1, Nodes), ", ").
+
+
+
+-spec node_prefix() -> string().
+
+node_prefix() ->
+ case string:tokens(atom_to_list(node()), ?NODENAME_PART_SEPARATOR) of
+ [Prefix, _] -> Prefix;
+ [_] -> ?DEFAULT_PREFIX
+ end.
+
+
+
+-spec append_node_prefix(Value :: binary() | string()) -> string().
+
+append_node_prefix(Value) when is_binary(Value) orelse is_list(Value) ->
+ Val = rabbit_data_coercion:to_list(Value),
+ Hostname = case string:tokens(Val, ?NODENAME_PART_SEPARATOR) of
+ [_ExistingPrefix, HN] -> HN;
+ [HN] -> HN
+ end,
+ string:join([node_prefix(), Hostname], ?NODENAME_PART_SEPARATOR).
diff --git a/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl b/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl
new file mode 100644
index 0000000000..8bc7382a75
--- /dev/null
+++ b/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_classic_config).
+-behaviour(rabbit_peer_discovery_backend).
+
+-include("rabbit.hrl").
+
+-export([list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+
+%%
+%% API
+%%
+
+-spec list_nodes() -> {ok, {Nodes :: [node()], rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+
+list_nodes() ->
+ case application:get_env(rabbit, cluster_nodes, {[], disc}) of
+ {_Nodes, _NodeType} = Pair -> {ok, Pair};
+ Nodes when is_list(Nodes) -> {ok, {Nodes, disc}}
+ end.
+
+-spec supports_registration() -> boolean().
+
+supports_registration() ->
+ %% If we don't have any nodes configured, skip randomized delay and similar operations
+ %% as we don't want to delay startup for no reason. MK.
+ has_any_peer_nodes_configured().
+
+-spec register() -> ok.
+
+register() ->
+ ok.
+
+-spec unregister() -> ok.
+
+unregister() ->
+ ok.
+
+-spec post_registration() -> ok.
+
+post_registration() ->
+ ok.
+
+-spec lock(Node :: atom()) -> not_supported.
+
+lock(_Node) ->
+ not_supported.
+
+-spec unlock(Data :: term()) -> ok.
+
+unlock(_Data) ->
+ ok.
+
+%%
+%% Helpers
+%%
+
+has_any_peer_nodes_configured() ->
+ case application:get_env(rabbit, cluster_nodes, []) of
+ {[], _NodeType} ->
+ false;
+ {Nodes, _NodeType} when is_list(Nodes) ->
+ true;
+ [] ->
+ false;
+ Nodes when is_list(Nodes) ->
+ true
+ end.
diff --git a/deps/rabbit/src/rabbit_peer_discovery_dns.erl b/deps/rabbit/src/rabbit_peer_discovery_dns.erl
new file mode 100644
index 0000000000..6e343a6e2d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_peer_discovery_dns.erl
@@ -0,0 +1,113 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_dns).
+-behaviour(rabbit_peer_discovery_backend).
+
+-include("rabbit.hrl").
+
+-export([list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+%% for tests
+-export([discover_nodes/2, discover_hostnames/2]).
+
+%%
+%% API
+%%
+
+-spec list_nodes() ->
+ {ok, {Nodes :: [node()], rabbit_types:node_type()}}.
+
+list_nodes() ->
+ case application:get_env(rabbit, cluster_formation) of
+ undefined ->
+ {ok, {[], disc}};
+ {ok, ClusterFormation} ->
+ case proplists:get_value(peer_discovery_dns, ClusterFormation) of
+ undefined ->
+ rabbit_log:warning("Peer discovery backend is set to ~s "
+ "but final config does not contain rabbit.cluster_formation.peer_discovery_dns. "
+ "Cannot discover any nodes because seed hostname is not configured!",
+ [?MODULE]),
+ {ok, {[], disc}};
+ Proplist ->
+ Hostname = rabbit_data_coercion:to_list(proplists:get_value(hostname, Proplist)),
+
+ {ok, {discover_nodes(Hostname, net_kernel:longnames()), rabbit_peer_discovery:node_type()}}
+ end
+ end.
+
+
+-spec supports_registration() -> boolean().
+
+supports_registration() ->
+ false.
+
+
+-spec register() -> ok.
+
+register() ->
+ ok.
+
+-spec unregister() -> ok.
+
+unregister() ->
+ ok.
+
+-spec post_registration() -> ok.
+
+post_registration() ->
+ ok.
+
+-spec lock(Node :: atom()) -> not_supported.
+
+lock(_Node) ->
+ not_supported.
+
+-spec unlock(Data :: term()) -> ok.
+
+unlock(_Data) ->
+ ok.
+
+%%
+%% Implementation
+%%
+
+discover_nodes(SeedHostname, LongNamesUsed) ->
+ [list_to_atom(rabbit_peer_discovery:append_node_prefix(H)) ||
+ H <- discover_hostnames(SeedHostname, LongNamesUsed)].
+
+discover_hostnames(SeedHostname, LongNamesUsed) ->
+ lookup(SeedHostname, LongNamesUsed, ipv4) ++
+ lookup(SeedHostname, LongNamesUsed, ipv6).
+
+decode_record(ipv4) ->
+ a;
+decode_record(ipv6) ->
+ aaaa.
+
+lookup(SeedHostname, LongNamesUsed, IPv) ->
+ IPs = inet_res:lookup(SeedHostname, in, decode_record(IPv)),
+ rabbit_log:info("Addresses discovered via ~s records of ~s: ~s",
+ [string:to_upper(atom_to_list(decode_record(IPv))),
+ SeedHostname,
+ string:join([inet_parse:ntoa(IP) || IP <- IPs], ", ")]),
+ Hosts = [extract_host(inet:gethostbyaddr(A), LongNamesUsed, A) ||
+ A <- IPs],
+ lists:filter(fun(E) -> E =/= error end, Hosts).
+
+
+%% long node names are used
+extract_host({ok, {hostent, FQDN, _, _, _, _}}, true, _Address) ->
+ FQDN;
+%% short node names are used
+extract_host({ok, {hostent, FQDN, _, _, _, _}}, false, _Address) ->
+ lists:nth(1, string:tokens(FQDN, "."));
+extract_host({error, Error}, _, Address) ->
+ rabbit_log:error("Reverse DNS lookup for address ~s failed: ~p",
+ [inet_parse:ntoa(Address), Error]),
+ error.
diff --git a/deps/rabbit/src/rabbit_plugins.erl b/deps/rabbit/src/rabbit_plugins.erl
new file mode 100644
index 0000000000..5697ffc29a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_plugins.erl
@@ -0,0 +1,699 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_plugins).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("stdlib/include/zip.hrl").
+
+-export([setup/0, active/0, read_enabled/1, list/1, list/2, dependencies/3, running_plugins/0]).
+-export([ensure/1]).
+-export([validate_plugins/1, format_invalid_plugins/1]).
+-export([is_strictly_plugin/1, strictly_plugins/2, strictly_plugins/1]).
+-export([plugins_dir/0, plugin_names/1, plugins_expand_dir/0, enabled_plugins_file/0]).
+
+% Export for testing purpose.
+-export([is_version_supported/2, validate_plugins/2]).
+%%----------------------------------------------------------------------------
+
+-type plugin_name() :: atom().
+
+%%----------------------------------------------------------------------------
+
+-spec ensure(string()) -> {'ok', [atom()], [atom()]} | {error, any()}.
+
+ensure(FileJustChanged) ->
+ case rabbit:is_running() of
+ true -> ensure1(FileJustChanged);
+ false -> {error, rabbit_not_running}
+ end.
+
+ensure1(FileJustChanged0) ->
+ {ok, OurFile0} = application:get_env(rabbit, enabled_plugins_file),
+ FileJustChanged = filename:nativename(FileJustChanged0),
+ OurFile = filename:nativename(OurFile0),
+ case OurFile of
+ FileJustChanged ->
+ Enabled = read_enabled(OurFile),
+ Wanted = prepare_plugins(Enabled),
+ Current = active(),
+ Start = Wanted -- Current,
+ Stop = Current -- Wanted,
+ rabbit:start_apps(Start),
+ %% We need sync_notify here since mgmt will attempt to look at all
+ %% the modules for the disabled plugins - if they are unloaded
+ %% that won't work.
+ ok = rabbit_event:sync_notify(plugins_changed, [{enabled, Start},
+ {disabled, Stop}]),
+ %% The app_utils module stops the apps in reverse order, so we should
+ %% pass them here in dependency order.
+ rabbit:stop_apps(lists:reverse(Stop)),
+ clean_plugins(Stop),
+ case {Start, Stop} of
+ {[], []} ->
+ ok;
+ {[], _} ->
+ rabbit_log:info("Plugins changed; disabled ~p~n",
+ [Stop]);
+ {_, []} ->
+ rabbit_log:info("Plugins changed; enabled ~p~n",
+ [Start]);
+ {_, _} ->
+ rabbit_log:info("Plugins changed; enabled ~p, disabled ~p~n",
+ [Start, Stop])
+ end,
+ {ok, Start, Stop};
+ _ ->
+ {error, {enabled_plugins_mismatch, FileJustChanged, OurFile}}
+ end.
+
+-spec plugins_expand_dir() -> file:filename().
+plugins_expand_dir() ->
+ case application:get_env(rabbit, plugins_expand_dir) of
+ {ok, ExpandDir} ->
+ ExpandDir;
+ _ ->
+ filename:join([rabbit_mnesia:dir(), "plugins_expand_dir"])
+ end.
+
+-spec plugins_dir() -> file:filename().
+plugins_dir() ->
+ case application:get_env(rabbit, plugins_dir) of
+ {ok, PluginsDistDir} ->
+ PluginsDistDir;
+ _ ->
+ filename:join([rabbit_mnesia:dir(), "plugins_dir_stub"])
+ end.
+
+-spec enabled_plugins_file() -> file:filename().
+enabled_plugins_file() ->
+ case application:get_env(rabbit, enabled_plugins_file) of
+ {ok, Val} ->
+ Val;
+ _ ->
+ filename:join([rabbit_mnesia:dir(), "enabled_plugins"])
+ end.
+
+-spec enabled_plugins() -> [atom()].
+enabled_plugins() ->
+ case application:get_env(rabbit, enabled_plugins_file) of
+ {ok, EnabledFile} ->
+ read_enabled(EnabledFile);
+ _ ->
+ []
+ end.
+
+%% @doc Prepares the file system and installs all enabled plugins.
+
+-spec setup() -> [plugin_name()].
+
+setup() ->
+ ExpandDir = plugins_expand_dir(),
+ %% Eliminate the contents of the destination directory
+ case delete_recursively(ExpandDir) of
+ ok -> ok;
+ {error, E1} -> throw({error, {cannot_delete_plugins_expand_dir,
+ [ExpandDir, E1]}})
+ end,
+ Enabled = enabled_plugins(),
+ prepare_plugins(Enabled).
+
+%% @doc Lists the plugins which are currently running.
+
+-spec active() -> [plugin_name()].
+
+active() ->
+ InstalledPlugins = plugin_names(list(plugins_dir())),
+ [App || {App, _, _} <- rabbit_misc:which_applications(),
+ lists:member(App, InstalledPlugins)].
+
+%% @doc Get the list of plugins which are ready to be enabled.
+
+-spec list(string()) -> [#plugin{}].
+
+list(PluginsPath) ->
+ list(PluginsPath, false).
+
+-spec list(string(), boolean()) -> [#plugin{}].
+
+list(PluginsPath, IncludeRequiredDeps) ->
+ {AllPlugins, LoadingProblems} = discover_plugins(split_path(PluginsPath)),
+ {UniquePlugins, DuplicateProblems} = remove_duplicate_plugins(AllPlugins),
+ Plugins1 = maybe_keep_required_deps(IncludeRequiredDeps, UniquePlugins),
+ Plugins2 = remove_plugins(Plugins1),
+ maybe_report_plugin_loading_problems(LoadingProblems ++ DuplicateProblems),
+ ensure_dependencies(Plugins2).
+
+%% @doc Read the list of enabled plugins from the supplied term file.
+
+-spec read_enabled(file:filename()) -> [plugin_name()].
+
+read_enabled(PluginsFile) ->
+ case rabbit_file:read_term_file(PluginsFile) of
+ {ok, [Plugins]} -> Plugins;
+ {ok, []} -> [];
+ {ok, [_|_]} -> throw({error, {malformed_enabled_plugins_file,
+ PluginsFile}});
+ {error, enoent} -> [];
+ {error, Reason} -> throw({error, {cannot_read_enabled_plugins_file,
+ PluginsFile, Reason}})
+ end.
+
+%% @doc Calculate the dependency graph from <i>Sources</i>.
+%% When Reverse =:= true the bottom/leaf level applications are returned in
+%% the resulting list, otherwise they're skipped.
+
+-spec dependencies(boolean(), [plugin_name()], [#plugin{}]) ->
+ [plugin_name()].
+
+dependencies(Reverse, Sources, AllPlugins) ->
+ {ok, G} = rabbit_misc:build_acyclic_graph(
+ fun ({App, _Deps}) -> [{App, App}] end,
+ fun ({App, Deps}) -> [{App, Dep} || Dep <- Deps] end,
+ [{Name, Deps} || #plugin{name = Name,
+ dependencies = Deps} <- AllPlugins]),
+ Dests = case Reverse of
+ false -> digraph_utils:reachable(Sources, G);
+ true -> digraph_utils:reaching(Sources, G)
+ end,
+ OrderedDests = digraph_utils:postorder(digraph_utils:subgraph(G, Dests)),
+ true = digraph:delete(G),
+ OrderedDests.
+
+%% Filter real plugins from application dependencies
+
+-spec is_strictly_plugin(#plugin{}) -> boolean().
+
+is_strictly_plugin(#plugin{extra_dependencies = ExtraDeps}) ->
+ lists:member(rabbit, ExtraDeps).
+
+-spec strictly_plugins([plugin_name()], [#plugin{}]) -> [plugin_name()].
+
+strictly_plugins(Plugins, AllPlugins) ->
+ lists:filter(
+ fun(Name) ->
+ is_strictly_plugin(lists:keyfind(Name, #plugin.name, AllPlugins))
+ end, Plugins).
+
+-spec strictly_plugins([plugin_name()]) -> [plugin_name()].
+
+strictly_plugins(Plugins) ->
+ AllPlugins = list(plugins_dir()),
+ lists:filter(
+ fun(Name) ->
+ is_strictly_plugin(lists:keyfind(Name, #plugin.name, AllPlugins))
+ end, Plugins).
+
+%% For a few known cases, an externally provided plugin can be trusted.
+%% In this special case, it overrides the plugin.
+is_plugin_provided_by_otp(#plugin{name = eldap}) ->
+ %% eldap was added to Erlang/OTP R15B01 (ERTS 5.9.1). In this case,
+ %% we prefer this version to the plugin.
+ rabbit_misc:version_compare(erlang:system_info(version), "5.9.1", gte);
+is_plugin_provided_by_otp(_) ->
+ false.
+
+%% Make sure we don't list OTP apps in here, and also that we detect
+%% missing dependencies.
+ensure_dependencies(Plugins) ->
+ Names = plugin_names(Plugins),
+ NotThere = [Dep || #plugin{dependencies = Deps} <- Plugins,
+ Dep <- Deps,
+ not lists:member(Dep, Names)],
+ {OTP, Missing} = lists:partition(fun is_loadable/1, lists:usort(NotThere)),
+ case Missing of
+ [] -> ok;
+ _ -> Blame = [Name || #plugin{name = Name,
+ dependencies = Deps} <- Plugins,
+ lists:any(fun (Dep) ->
+ lists:member(Dep, Missing)
+ end, Deps)],
+ throw({error, {missing_dependencies, Missing, Blame}})
+ end,
+ [P#plugin{dependencies = Deps -- OTP,
+ extra_dependencies = Deps -- (Deps -- OTP)}
+ || P = #plugin{dependencies = Deps} <- Plugins].
+
+is_loadable(App) ->
+ case application:load(App) of
+ {error, {already_loaded, _}} -> true;
+ ok -> application:unload(App),
+ true;
+ _ -> false
+ end.
+
+
+%% List running plugins along with their version.
+-spec running_plugins() -> {ok, [{atom(), Vsn :: string()}]}.
+running_plugins() ->
+ ActivePlugins = active(),
+ {ok, [{App, Vsn} || {App, _ , Vsn} <- rabbit_misc:which_applications(), lists:member(App, ActivePlugins)]}.
+
+%%----------------------------------------------------------------------------
+
+prepare_plugins(Enabled) ->
+ ExpandDir = plugins_expand_dir(),
+ AllPlugins = list(plugins_dir()),
+ Wanted = dependencies(false, Enabled, AllPlugins),
+ WantedPlugins = lookup_plugins(Wanted, AllPlugins),
+ {ValidPlugins, Problems} = validate_plugins(WantedPlugins),
+ maybe_warn_about_invalid_plugins(Problems),
+ case filelib:ensure_dir(ExpandDir ++ "/") of
+ ok -> ok;
+ {error, E2} -> throw({error, {cannot_create_plugins_expand_dir,
+ [ExpandDir, E2]}})
+ end,
+ [prepare_plugin(Plugin, ExpandDir) || Plugin <- ValidPlugins],
+ Wanted.
+
+maybe_warn_about_invalid_plugins([]) ->
+ ok;
+maybe_warn_about_invalid_plugins(InvalidPlugins) ->
+ %% TODO: error message formatting
+ rabbit_log:warning(format_invalid_plugins(InvalidPlugins)).
+
+
+format_invalid_plugins(InvalidPlugins) ->
+ lists:flatten(["Failed to enable some plugins: \r\n"
+ | [format_invalid_plugin(Plugin)
+ || Plugin <- InvalidPlugins]]).
+
+format_invalid_plugin({Name, Errors}) ->
+ [io_lib:format(" ~p:~n", [Name])
+ | [format_invalid_plugin_error(Err) || Err <- Errors]].
+
+format_invalid_plugin_error({missing_dependency, Dep}) ->
+ io_lib:format(" Dependency is missing or invalid: ~p~n", [Dep]);
+%% a plugin doesn't support the effective broker version
+format_invalid_plugin_error({broker_version_mismatch, Version, Required}) ->
+ io_lib:format(" Plugin doesn't support current server version."
+ " Actual broker version: ~p, supported by the plugin: ~p~n",
+ [Version, format_required_versions(Required)]);
+%% one of dependencies of a plugin doesn't match its version requirements
+format_invalid_plugin_error({{dependency_version_mismatch, Version, Required}, Name}) ->
+ io_lib:format(" Version '~p' of dependency '~p' is unsupported."
+ " Version ranges supported by the plugin: ~p~n",
+ [Version, Name, Required]);
+format_invalid_plugin_error(Err) ->
+ io_lib:format(" Unknown error ~p~n", [Err]).
+
+format_required_versions(Versions) ->
+ lists:map(fun(V) ->
+ case re:run(V, "^[0-9]*\.[0-9]*\.", [{capture, all, list}]) of
+ {match, [Sub]} ->
+ lists:flatten(io_lib:format("~s-~sx", [V, Sub]));
+ _ ->
+ V
+ end
+ end, Versions).
+
+validate_plugins(Plugins) ->
+ application:load(rabbit),
+ RabbitVersion = RabbitVersion = case application:get_key(rabbit, vsn) of
+ undefined -> "0.0.0";
+ {ok, Val} -> Val
+ end,
+ validate_plugins(Plugins, RabbitVersion).
+
+validate_plugins(Plugins, BrokerVersion) ->
+ lists:foldl(
+ fun(#plugin{name = Name,
+ broker_version_requirements = BrokerVersionReqs,
+ dependency_version_requirements = DepsVersions} = Plugin,
+ {Plugins0, Errors}) ->
+ case is_version_supported(BrokerVersion, BrokerVersionReqs) of
+ true ->
+ case BrokerVersion of
+ "0.0.0" ->
+ rabbit_log:warning(
+ "Running development version of the broker."
+ " Requirement ~p for plugin ~p is ignored.",
+ [BrokerVersionReqs, Name]);
+ _ -> ok
+ end,
+ case check_plugins_versions(Name, Plugins0, DepsVersions) of
+ ok -> {[Plugin | Plugins0], Errors};
+ {error, Err} -> {Plugins0, [{Name, Err} | Errors]}
+ end;
+ false ->
+ Error = [{broker_version_mismatch, BrokerVersion, BrokerVersionReqs}],
+ {Plugins0, [{Name, Error} | Errors]}
+ end
+ end,
+ {[],[]},
+ Plugins).
+
+check_plugins_versions(PluginName, AllPlugins, RequiredVersions) ->
+ ExistingVersions = [{Name, Vsn}
+ || #plugin{name = Name, version = Vsn} <- AllPlugins],
+ Problems = lists:foldl(
+ fun({Name, Versions}, Acc) ->
+ case proplists:get_value(Name, ExistingVersions) of
+ undefined -> [{missing_dependency, Name} | Acc];
+ Version ->
+ case is_version_supported(Version, Versions) of
+ true ->
+ case Version of
+ "" ->
+ rabbit_log:warning(
+ "~p plugin version is not defined."
+ " Requirement ~p for plugin ~p is ignored",
+ [Versions, PluginName]);
+ _ -> ok
+ end,
+ Acc;
+ false ->
+ [{{dependency_version_mismatch, Version, Versions}, Name} | Acc]
+ end
+ end
+ end,
+ [],
+ RequiredVersions),
+ case Problems of
+ [] -> ok;
+ _ -> {error, Problems}
+ end.
+
+is_version_supported("", _) -> true;
+is_version_supported("0.0.0", _) -> true;
+is_version_supported(_Version, []) -> true;
+is_version_supported(VersionFull, ExpectedVersions) ->
+ %% Pre-release version should be supported in plugins,
+ %% therefore preview part should be removed
+ Version = remove_version_preview_part(VersionFull),
+ case lists:any(fun(ExpectedVersion) ->
+ rabbit_misc:strict_version_minor_equivalent(ExpectedVersion,
+ Version)
+ andalso
+ rabbit_misc:version_compare(ExpectedVersion, Version, lte)
+ end,
+ ExpectedVersions) of
+ true -> true;
+ false -> false
+ end.
+
+remove_version_preview_part(Version) ->
+ {Ver, _Preview} = rabbit_semver:parse(Version),
+ iolist_to_binary(rabbit_semver:format({Ver, {[], []}})).
+
+clean_plugins(Plugins) ->
+ ExpandDir = plugins_expand_dir(),
+ [clean_plugin(Plugin, ExpandDir) || Plugin <- Plugins].
+
+clean_plugin(Plugin, ExpandDir) ->
+ {ok, Mods} = application:get_key(Plugin, modules),
+ application:unload(Plugin),
+ [begin
+ code:soft_purge(Mod),
+ code:delete(Mod),
+ false = code:is_loaded(Mod)
+ end || Mod <- Mods],
+ delete_recursively(rabbit_misc:format("~s/~s", [ExpandDir, Plugin])).
+
+prepare_dir_plugin(PluginAppDescPath) ->
+ PluginEbinDir = filename:dirname(PluginAppDescPath),
+ Plugin = filename:basename(PluginAppDescPath, ".app"),
+ code:add_patha(PluginEbinDir),
+ case filelib:wildcard(PluginEbinDir++ "/*.beam") of
+ [] ->
+ ok;
+ [BeamPath | _] ->
+ Module = list_to_atom(filename:basename(BeamPath, ".beam")),
+ case code:ensure_loaded(Module) of
+ {module, _} ->
+ ok;
+ {error, badfile} ->
+ rabbit_log:error("Failed to enable plugin \"~s\": "
+ "it may have been built with an "
+ "incompatible (more recent?) "
+ "version of Erlang~n", [Plugin]),
+ throw({plugin_built_with_incompatible_erlang, Plugin});
+ Error ->
+ throw({plugin_module_unloadable, Plugin, Error})
+ end
+ end.
+
+%%----------------------------------------------------------------------------
+
+delete_recursively(Fn) ->
+ case rabbit_file:recursive_delete([Fn]) of
+ ok -> ok;
+ {error, {Path, E}} -> {error, {cannot_delete, Path, E}}
+ end.
+
+find_unzipped_app_file(ExpandDir, Files) ->
+ StripComponents = length(filename:split(ExpandDir)),
+ [ X || X <- Files,
+ [_AppName, "ebin", MaybeAppFile] <-
+ [lists:nthtail(StripComponents, filename:split(X))],
+ lists:suffix(".app", MaybeAppFile)
+ ].
+
+prepare_plugin(#plugin{type = ez, name = Name, location = Location}, ExpandDir) ->
+ case zip:unzip(Location, [{cwd, ExpandDir}]) of
+ {ok, Files} ->
+ case find_unzipped_app_file(ExpandDir, Files) of
+ [PluginAppDescPath|_] ->
+ prepare_dir_plugin(PluginAppDescPath);
+ _ ->
+ rabbit_log:error("Plugin archive '~s' doesn't contain an .app file~n", [Location]),
+ throw({app_file_missing, Name, Location})
+ end;
+ {error, Reason} ->
+ rabbit_log:error("Could not unzip plugin archive '~s': ~p~n", [Location, Reason]),
+ throw({failed_to_unzip_plugin, Name, Location, Reason})
+ end;
+prepare_plugin(#plugin{type = dir, location = Location, name = Name},
+ _ExpandDir) ->
+ case filelib:wildcard(Location ++ "/ebin/*.app") of
+ [PluginAppDescPath|_] ->
+ prepare_dir_plugin(PluginAppDescPath);
+ _ ->
+ rabbit_log:error("Plugin directory '~s' doesn't contain an .app file~n", [Location]),
+ throw({app_file_missing, Name, Location})
+ end.
+
+plugin_info({ez, EZ}) ->
+ case read_app_file(EZ) of
+ {application, Name, Props} -> mkplugin(Name, Props, ez, EZ);
+ {error, Reason} -> {error, EZ, Reason}
+ end;
+plugin_info({app, App}) ->
+ case rabbit_file:read_term_file(App) of
+ {ok, [{application, Name, Props}]} ->
+ mkplugin(Name, Props, dir,
+ filename:absname(
+ filename:dirname(filename:dirname(App))));
+ {error, Reason} ->
+ {error, App, {invalid_app, Reason}}
+ end.
+
+mkplugin(Name, Props, Type, Location) ->
+ Version = proplists:get_value(vsn, Props, "0"),
+ Description = proplists:get_value(description, Props, ""),
+ Dependencies = proplists:get_value(applications, Props, []),
+ BrokerVersions = proplists:get_value(broker_version_requirements, Props, []),
+ DepsVersions = proplists:get_value(dependency_version_requirements, Props, []),
+ #plugin{name = Name, version = Version, description = Description,
+ dependencies = Dependencies, location = Location, type = Type,
+ broker_version_requirements = BrokerVersions,
+ dependency_version_requirements = DepsVersions}.
+
+read_app_file(EZ) ->
+ case zip:list_dir(EZ) of
+ {ok, [_|ZippedFiles]} ->
+ case find_app_files(ZippedFiles) of
+ [AppPath|_] ->
+ {ok, [{AppPath, AppFile}]} =
+ zip:extract(EZ, [{file_list, [AppPath]}, memory]),
+ parse_binary(AppFile);
+ [] ->
+ {error, no_app_file}
+ end;
+ {error, Reason} ->
+ {error, {invalid_ez, Reason}}
+ end.
+
+find_app_files(ZippedFiles) ->
+ {ok, RE} = re:compile("^.*/ebin/.*.app$"),
+ [Path || {zip_file, Path, _, _, _, _} <- ZippedFiles,
+ re:run(Path, RE, [{capture, none}]) =:= match].
+
+parse_binary(Bin) ->
+ try
+ {ok, Ts, _} = erl_scan:string(binary_to_list(Bin)),
+ {ok, Term} = erl_parse:parse_term(Ts),
+ Term
+ catch
+ Err -> {error, {invalid_app, Err}}
+ end.
+
+plugin_names(Plugins) ->
+ [Name || #plugin{name = Name} <- Plugins].
+
+lookup_plugins(Names, AllPlugins) ->
+ %% Preserve order of Names
+ lists:map(
+ fun(Name) ->
+ lists:keyfind(Name, #plugin.name, AllPlugins)
+ end,
+ Names).
+
+%% Split PATH-like value into its components.
+split_path(PathString) ->
+ Delimiters = case os:type() of
+ {unix, _} -> ":";
+ {win32, _} -> ";"
+ end,
+ string:tokens(PathString, Delimiters).
+
+%% Search for files using glob in a given dir. Returns full filenames of those files.
+full_path_wildcard(Glob, Dir) ->
+ [filename:join([Dir, File]) || File <- filelib:wildcard(Glob, Dir)].
+
+%% Returns list off all .ez files in a given set of directories
+list_ezs([]) ->
+ [];
+list_ezs([Dir|Rest]) ->
+ [{ez, EZ} || EZ <- full_path_wildcard("*.ez", Dir)] ++ list_ezs(Rest).
+
+%% Returns list of all files that look like OTP applications in a
+%% given set of directories.
+list_free_apps([]) ->
+ [];
+list_free_apps([Dir|Rest]) ->
+ [{app, App} || App <- full_path_wildcard("*/ebin/*.app", Dir)]
+ ++ list_free_apps(Rest).
+
+compare_by_name_and_version(#plugin{name = Name, version = VersionA},
+ #plugin{name = Name, version = VersionB}) ->
+ rabbit_semver:lte(VersionA, VersionB);
+compare_by_name_and_version(#plugin{name = NameA},
+ #plugin{name = NameB}) ->
+ NameA =< NameB.
+
+-spec discover_plugins([Directory]) -> {[#plugin{}], [Problem]} when
+ Directory :: file:name(),
+ Problem :: {file:name(), term()}.
+discover_plugins(PluginsDirs) ->
+ EZs = list_ezs(PluginsDirs),
+ FreeApps = list_free_apps(PluginsDirs),
+ read_plugins_info(EZs ++ FreeApps, {[], []}).
+
+read_plugins_info([], Acc) ->
+ Acc;
+read_plugins_info([Path|Paths], {Plugins, Problems}) ->
+ case plugin_info(Path) of
+ #plugin{} = Plugin ->
+ read_plugins_info(Paths, {[Plugin|Plugins], Problems});
+ {error, Location, Reason} ->
+ read_plugins_info(Paths, {Plugins, [{Location, Reason}|Problems]})
+ end.
+
+remove_duplicate_plugins(Plugins) ->
+ %% Reverse order ensures that if there are several versions of the
+ %% same plugin, the most recent one comes first.
+ Sorted = lists:reverse(
+ lists:sort(fun compare_by_name_and_version/2, Plugins)),
+ remove_duplicate_plugins(Sorted, {[], []}).
+
+remove_duplicate_plugins([], Acc) ->
+ Acc;
+remove_duplicate_plugins([Best = #plugin{name = Name}, Offender = #plugin{name = Name} | Rest],
+ {Plugins0, Problems0}) ->
+ Problems1 = [{Offender#plugin.location, duplicate_plugin}|Problems0],
+ remove_duplicate_plugins([Best|Rest], {Plugins0, Problems1});
+remove_duplicate_plugins([Plugin|Rest], {Plugins0, Problems0}) ->
+ Plugins1 = [Plugin|Plugins0],
+ remove_duplicate_plugins(Rest, {Plugins1, Problems0}).
+
+maybe_keep_required_deps(true, Plugins) ->
+ Plugins;
+maybe_keep_required_deps(false, Plugins) ->
+ RabbitDeps = list_all_deps([rabbit]),
+ lists:filter(fun
+ (#plugin{name = Name}) ->
+ not lists:member(Name, RabbitDeps);
+ (Name) when is_atom(Name) ->
+ not lists:member(Name, RabbitDeps)
+ end,
+ Plugins).
+
+list_all_deps(Applications) ->
+ list_all_deps(Applications, []).
+
+list_all_deps([Application | Applications], Deps) ->
+ %% We load the application to be sure we can get the "applications" key.
+ %% This is required for rabbitmq-plugins for instance.
+ application:load(Application),
+ NewDeps = [Application | Deps],
+ case application:get_key(Application, applications) of
+ {ok, ApplicationDeps} ->
+ RemainingApplications0 = ApplicationDeps ++ Applications,
+ RemainingApplications = RemainingApplications0 -- NewDeps,
+ list_all_deps(RemainingApplications, NewDeps);
+ undefined ->
+ list_all_deps(Applications, NewDeps)
+ end;
+list_all_deps([], Deps) ->
+ Deps.
+
+remove_plugins(Plugins) ->
+ %% We want to filter out all Erlang applications in the plugins
+ %% directories which are not actual RabbitMQ plugin.
+ %%
+ %% A RabbitMQ plugin must depend on `rabbit`. We also want to keep
+ %% all applications they depend on, except Erlang/OTP applications.
+ %% In the end, we will skip:
+ %% * Erlang/OTP applications
+ %% * All applications which do not depend on `rabbit` and which
+ %% are not direct or indirect dependencies of plugins.
+ ActualPlugins = [Plugin
+ || #plugin{dependencies = Deps} = Plugin <- Plugins,
+ lists:member(rabbit, Deps)],
+ %% As said above, we want to keep all non-plugins which are
+ %% dependencies of plugins.
+ PluginDeps = lists:usort(
+ lists:flatten(
+ [resolve_deps(Plugins, Plugin)
+ || Plugin <- ActualPlugins])),
+ lists:filter(
+ fun(#plugin{name = Name} = Plugin) ->
+ IsOTPApp = is_plugin_provided_by_otp(Plugin),
+ IsAPlugin =
+ lists:member(Plugin, ActualPlugins) orelse
+ lists:member(Name, PluginDeps),
+ if
+ IsOTPApp ->
+ rabbit_log:debug(
+ "Plugins discovery: "
+ "ignoring ~s, Erlang/OTP application",
+ [Name]);
+ not IsAPlugin ->
+ rabbit_log:debug(
+ "Plugins discovery: "
+ "ignoring ~s, not a RabbitMQ plugin",
+ [Name]);
+ true ->
+ ok
+ end,
+ not (IsOTPApp orelse not IsAPlugin)
+ end, Plugins).
+
+resolve_deps(Plugins, #plugin{dependencies = Deps}) ->
+ IndirectDeps = [case lists:keyfind(Dep, #plugin.name, Plugins) of
+ false -> [];
+ DepPlugin -> resolve_deps(Plugins, DepPlugin)
+ end
+ || Dep <- Deps],
+ Deps ++ IndirectDeps.
+
+maybe_report_plugin_loading_problems([]) ->
+ ok;
+maybe_report_plugin_loading_problems(Problems) ->
+ io:format(standard_error,
+ "Problem reading some plugins: ~p~n",
+ [Problems]).
diff --git a/deps/rabbit/src/rabbit_policies.erl b/deps/rabbit/src/rabbit_policies.erl
new file mode 100644
index 0000000000..54e4d2c03e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_policies.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_policies).
+
+%% Provides built-in policy parameter
+%% validation functions.
+
+-behaviour(rabbit_policy_validator).
+-behaviour(rabbit_policy_merge_strategy).
+
+-include("rabbit.hrl").
+
+-export([register/0, validate_policy/1, merge_policy_value/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "internal policies"},
+ {mfa, {rabbit_policies, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ %% Note: there are more validators registered from other modules,
+ %% such as rabbit_mirror_queue_misc
+ [rabbit_registry:register(Class, Name, ?MODULE) ||
+ {Class, Name} <- [{policy_validator, <<"alternate-exchange">>},
+ {policy_validator, <<"dead-letter-exchange">>},
+ {policy_validator, <<"dead-letter-routing-key">>},
+ {policy_validator, <<"message-ttl">>},
+ {policy_validator, <<"expires">>},
+ {policy_validator, <<"max-length">>},
+ {policy_validator, <<"max-length-bytes">>},
+ {policy_validator, <<"max-in-memory-length">>},
+ {policy_validator, <<"max-in-memory-bytes">>},
+ {policy_validator, <<"queue-mode">>},
+ {policy_validator, <<"overflow">>},
+ {policy_validator, <<"delivery-limit">>},
+ {policy_validator, <<"max-age">>},
+ {policy_validator, <<"max-segment-size">>},
+ {policy_validator, <<"queue-leader-locator">>},
+ {policy_validator, <<"initial-cluster-size">>},
+ {operator_policy_validator, <<"expires">>},
+ {operator_policy_validator, <<"message-ttl">>},
+ {operator_policy_validator, <<"max-length">>},
+ {operator_policy_validator, <<"max-length-bytes">>},
+ {operator_policy_validator, <<"max-in-memory-length">>},
+ {operator_policy_validator, <<"max-in-memory-bytes">>},
+ {operator_policy_validator, <<"delivery-limit">>},
+ {policy_merge_strategy, <<"expires">>},
+ {policy_merge_strategy, <<"message-ttl">>},
+ {policy_merge_strategy, <<"max-length">>},
+ {policy_merge_strategy, <<"max-length-bytes">>},
+ {policy_merge_strategy, <<"max-in-memory-length">>},
+ {policy_merge_strategy, <<"max-in-memory-bytes">>},
+ {policy_merge_strategy, <<"delivery-limit">>}]],
+ ok.
+
+-spec validate_policy([{binary(), term()}]) -> rabbit_policy_validator:validate_results().
+
+validate_policy(Terms) ->
+ lists:foldl(fun ({Key, Value}, ok) -> validate_policy0(Key, Value);
+ (_, Error) -> Error
+ end, ok, Terms).
+
+validate_policy0(<<"alternate-exchange">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"alternate-exchange">>, Value) ->
+ {error, "~p is not a valid alternate exchange name", [Value]};
+
+validate_policy0(<<"dead-letter-exchange">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"dead-letter-exchange">>, Value) ->
+ {error, "~p is not a valid dead letter exchange name", [Value]};
+
+validate_policy0(<<"dead-letter-routing-key">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"dead-letter-routing-key">>, Value) ->
+ {error, "~p is not a valid dead letter routing key", [Value]};
+
+validate_policy0(<<"message-ttl">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"message-ttl">>, Value) ->
+ {error, "~p is not a valid message TTL", [Value]};
+
+validate_policy0(<<"expires">>, Value)
+ when is_integer(Value), Value >= 1 ->
+ ok;
+validate_policy0(<<"expires">>, Value) ->
+ {error, "~p is not a valid queue expiry", [Value]};
+
+validate_policy0(<<"max-length">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-length">>, Value) ->
+ {error, "~p is not a valid maximum length", [Value]};
+
+validate_policy0(<<"max-length-bytes">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-length-bytes">>, Value) ->
+ {error, "~p is not a valid maximum length in bytes", [Value]};
+
+validate_policy0(<<"max-in-memory-length">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-in-memory-length">>, Value) ->
+ {error, "~p is not a valid maximum memory in bytes", [Value]};
+
+validate_policy0(<<"max-in-memory-bytes">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-in-memory-bytes">>, Value) ->
+ {error, "~p is not a valid maximum memory in bytes", [Value]};
+
+validate_policy0(<<"queue-mode">>, <<"default">>) ->
+ ok;
+validate_policy0(<<"queue-mode">>, <<"lazy">>) ->
+ ok;
+validate_policy0(<<"queue-mode">>, Value) ->
+ {error, "~p is not a valid queue-mode value", [Value]};
+validate_policy0(<<"overflow">>, <<"drop-head">>) ->
+ ok;
+validate_policy0(<<"overflow">>, <<"reject-publish">>) ->
+ ok;
+validate_policy0(<<"overflow">>, <<"reject-publish-dlx">>) ->
+ ok;
+validate_policy0(<<"overflow">>, Value) ->
+ {error, "~p is not a valid overflow value", [Value]};
+
+validate_policy0(<<"delivery-limit">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"delivery-limit">>, Value) ->
+ {error, "~p is not a valid delivery limit", [Value]};
+
+validate_policy0(<<"max-age">>, Value) ->
+ case rabbit_amqqueue:check_max_age(Value) of
+ {error, _} ->
+ {error, "~p is not a valid max age", [Value]};
+ _ ->
+ ok
+ end;
+
+validate_policy0(<<"queue-leader-locator">>, <<"client-local">>) ->
+ ok;
+validate_policy0(<<"queue-leader-locator">>, <<"random">>) ->
+ ok;
+validate_policy0(<<"queue-leader-locator">>, <<"least-leaders">>) ->
+ ok;
+validate_policy0(<<"queue-leader-locator">>, Value) ->
+ {error, "~p is not a valid queue leader locator value", [Value]};
+
+validate_policy0(<<"initial-cluster-size">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"initial-cluster-size">>, Value) ->
+ {error, "~p is not a valid cluster size", [Value]};
+
+validate_policy0(<<"max-segment-size">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-segment-size">>, Value) ->
+ {error, "~p is not a valid segment size", [Value]}.
+
+merge_policy_value(<<"message-ttl">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"max-length">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"max-length-bytes">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"max-in-memory-length">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"max-in-memory-bytes">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"expires">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"delivery-limit">>, Val, OpVal) -> min(Val, OpVal).
diff --git a/deps/rabbit/src/rabbit_policy.erl b/deps/rabbit/src/rabbit_policy.erl
new file mode 100644
index 0000000000..44807de97d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_policy.erl
@@ -0,0 +1,557 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_policy).
+
+%% Policies is a way to apply optional arguments ("x-args")
+%% to exchanges and queues in bulk, using name matching.
+%%
+%% Only one policy can apply to a given queue or exchange
+%% at a time. Priorities help determine what policy should
+%% take precedence.
+%%
+%% Policies build on runtime parameters. Policy-driven parameters
+%% are well known and therefore validated.
+%%
+%% See also:
+%%
+%% * rabbit_runtime_parameters
+%% * rabbit_policies
+%% * rabbit_registry
+
+%% TODO specs
+
+-behaviour(rabbit_runtime_parameter).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-export([register/0]).
+-export([invalidate/0, recover/0]).
+-export([name/1, name_op/1, effective_definition/1, merge_operator_definitions/2, get/2, get_arg/3, set/1]).
+-export([validate/5, notify/5, notify_clear/4]).
+-export([parse_set/7, set/7, delete/3, lookup/2, list/0, list/1,
+ list_formatted/1, list_formatted/3, info_keys/0]).
+-export([parse_set_op/7, set_op/7, delete_op/3, lookup_op/2, list_op/0, list_op/1,
+ list_formatted_op/1, list_formatted_op/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "policy parameters"},
+ {mfa, {rabbit_policy, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"policy">>, ?MODULE),
+ rabbit_registry:register(runtime_parameter, <<"operator_policy">>, ?MODULE).
+
+name(Q) when ?is_amqqueue(Q) ->
+ Policy = amqqueue:get_policy(Q),
+ name0(Policy);
+name(#exchange{policy = Policy}) -> name0(Policy).
+
+name_op(Q) when ?is_amqqueue(Q) ->
+ OpPolicy = amqqueue:get_operator_policy(Q),
+ name0(OpPolicy);
+name_op(#exchange{operator_policy = Policy}) -> name0(Policy).
+
+name0(undefined) -> none;
+name0(Policy) -> pget(name, Policy).
+
+effective_definition(Q) when ?is_amqqueue(Q) ->
+ Policy = amqqueue:get_policy(Q),
+ OpPolicy = amqqueue:get_operator_policy(Q),
+ merge_operator_definitions(Policy, OpPolicy);
+effective_definition(#exchange{policy = Policy, operator_policy = OpPolicy}) ->
+ merge_operator_definitions(Policy, OpPolicy).
+
+merge_operator_definitions(undefined, undefined) -> undefined;
+merge_operator_definitions(Policy, undefined) -> pget(definition, Policy);
+merge_operator_definitions(undefined, OpPolicy) -> pget(definition, OpPolicy);
+merge_operator_definitions(Policy, OpPolicy) ->
+ OpDefinition = rabbit_data_coercion:to_map(pget(definition, OpPolicy, [])),
+ Definition = rabbit_data_coercion:to_map(pget(definition, Policy, [])),
+ Keys = maps:keys(Definition),
+ OpKeys = maps:keys(OpDefinition),
+ lists:map(fun(Key) ->
+ case {maps:get(Key, Definition, undefined), maps:get(Key, OpDefinition, undefined)} of
+ {Val, undefined} -> {Key, Val};
+ {undefined, OpVal} -> {Key, OpVal};
+ {Val, OpVal} -> {Key, merge_policy_value(Key, Val, OpVal)}
+ end
+ end,
+ lists:umerge(Keys, OpKeys)).
+
+set(Q0) when ?is_amqqueue(Q0) ->
+ Name = amqqueue:get_name(Q0),
+ Policy = match(Name),
+ OpPolicy = match_op(Name),
+ Q1 = amqqueue:set_policy(Q0, Policy),
+ Q2 = amqqueue:set_operator_policy(Q1, OpPolicy),
+ Q2;
+set(X = #exchange{name = Name}) ->
+ X#exchange{policy = match(Name), operator_policy = match_op(Name)}.
+
+match(Name = #resource{virtual_host = VHost}) ->
+ match(Name, list(VHost)).
+
+match_op(Name = #resource{virtual_host = VHost}) ->
+ match(Name, list_op(VHost)).
+
+get(Name, Q) when ?is_amqqueue(Q) ->
+ Policy = amqqueue:get_policy(Q),
+ OpPolicy = amqqueue:get_operator_policy(Q),
+ get0(Name, Policy, OpPolicy);
+get(Name, #exchange{policy = Policy, operator_policy = OpPolicy}) ->
+ get0(Name, Policy, OpPolicy);
+
+%% Caution - SLOW.
+get(Name, EntityName = #resource{virtual_host = VHost}) ->
+ get0(Name,
+ match(EntityName, list(VHost)),
+ match(EntityName, list_op(VHost))).
+
+get0(_Name, undefined, undefined) -> undefined;
+get0(Name, undefined, OpPolicy) -> pget(Name, pget(definition, OpPolicy, []));
+get0(Name, Policy, undefined) -> pget(Name, pget(definition, Policy, []));
+get0(Name, Policy, OpPolicy) ->
+ OpDefinition = pget(definition, OpPolicy, []),
+ Definition = pget(definition, Policy, []),
+ case {pget(Name, Definition), pget(Name, OpDefinition)} of
+ {undefined, undefined} -> undefined;
+ {Val, undefined} -> Val;
+ {undefined, Val} -> Val;
+ {Val, OpVal} -> merge_policy_value(Name, Val, OpVal)
+ end.
+
+merge_policy_value(Name, PolicyVal, OpVal) ->
+ case policy_merge_strategy(Name) of
+ {ok, Module} -> Module:merge_policy_value(Name, PolicyVal, OpVal);
+ {error, not_found} -> rabbit_policies:merge_policy_value(Name, PolicyVal, OpVal)
+ end.
+
+policy_merge_strategy(Name) ->
+ case rabbit_registry:binary_to_type(rabbit_data_coercion:to_binary(Name)) of
+ {error, not_found} ->
+ {error, not_found};
+ T ->
+ rabbit_registry:lookup_module(policy_merge_strategy, T)
+ end.
+
+%% Many heads for optimisation
+get_arg(_AName, _PName, #exchange{arguments = [], policy = undefined}) ->
+ undefined;
+get_arg(_AName, PName, X = #exchange{arguments = []}) ->
+ get(PName, X);
+get_arg(AName, PName, X = #exchange{arguments = Args}) ->
+ case rabbit_misc:table_lookup(Args, AName) of
+ undefined -> get(PName, X);
+ {_Type, Arg} -> Arg
+ end.
+
+%%----------------------------------------------------------------------------
+
+%% Gets called during upgrades - therefore must not assume anything about the
+%% state of Mnesia
+invalidate() ->
+ rabbit_file:write_file(invalid_file(), <<"">>).
+
+recover() ->
+ case rabbit_file:is_file(invalid_file()) of
+ true -> recover0(),
+ rabbit_file:delete(invalid_file());
+ false -> ok
+ end.
+
+%% To get here we have to have just completed an Mnesia upgrade - i.e. we are
+%% the first node starting. So we can rewrite the whole database. Note that
+%% recovery has not yet happened; we must work with the rabbit_durable_<thing>
+%% variants.
+recover0() ->
+ Xs = mnesia:dirty_match_object(rabbit_durable_exchange, #exchange{_ = '_'}),
+ Qs = rabbit_amqqueue:list_with_possible_retry(
+ fun() ->
+ mnesia:dirty_match_object(
+ rabbit_durable_queue, amqqueue:pattern_match_all())
+ end),
+ Policies = list(),
+ OpPolicies = list_op(),
+ [rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:write(
+ rabbit_durable_exchange,
+ rabbit_exchange_decorator:set(
+ X#exchange{policy = match(Name, Policies),
+ operator_policy = match(Name, OpPolicies)}),
+ write)
+ end) || X = #exchange{name = Name} <- Xs],
+ [begin
+ QName = amqqueue:get_name(Q0),
+ Policy1 = match(QName, Policies),
+ Q1 = amqqueue:set_policy(Q0, Policy1),
+ OpPolicy1 = match(QName, OpPolicies),
+ Q2 = amqqueue:set_operator_policy(Q1, OpPolicy1),
+ Q3 = rabbit_queue_decorator:set(Q2),
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:write(rabbit_durable_queue, Q3, write)
+ end),
+ begin
+ Q4 = amqqueue:upgrade(Q3),
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:write(rabbit_durable_queue, Q4, write)
+ end)
+ end)
+ end || Q0 <- Qs],
+ ok.
+
+invalid_file() ->
+ filename:join(rabbit_mnesia:dir(), "policies_are_invalid").
+
+%%----------------------------------------------------------------------------
+
+parse_set_op(VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ parse_set(<<"operator_policy">>, VHost, Name, Pattern, Definition, Priority,
+ ApplyTo, ActingUser).
+
+parse_set(VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ parse_set(<<"policy">>, VHost, Name, Pattern, Definition, Priority, ApplyTo,
+ ActingUser).
+
+parse_set(Type, VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ try rabbit_data_coercion:to_integer(Priority) of
+ Num -> parse_set0(Type, VHost, Name, Pattern, Definition, Num, ApplyTo,
+ ActingUser)
+ catch
+ error:badarg -> {error, "~p priority must be a number", [Priority]}
+ end.
+
+parse_set0(Type, VHost, Name, Pattern, Defn, Priority, ApplyTo, ActingUser) ->
+ case rabbit_json:try_decode(Defn) of
+ {ok, Term} ->
+ R = set0(Type, VHost, Name,
+ [{<<"pattern">>, Pattern},
+ {<<"definition">>, maps:to_list(Term)},
+ {<<"priority">>, Priority},
+ {<<"apply-to">>, ApplyTo}],
+ ActingUser),
+ rabbit_log:info("Successfully set policy '~s' matching ~s names in virtual host '~s' using pattern '~s'",
+ [Name, ApplyTo, VHost, Pattern]),
+ R;
+ {error, Reason} ->
+ {error_string,
+ rabbit_misc:format("JSON decoding error. Reason: ~ts", [Reason])}
+ end.
+
+set_op(VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ set(<<"operator_policy">>, VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser).
+
+set(VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ set(<<"policy">>, VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser).
+
+set(Type, VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ PolicyProps = [{<<"pattern">>, Pattern},
+ {<<"definition">>, Definition},
+ {<<"priority">>, case Priority of
+ undefined -> 0;
+ _ -> Priority
+ end},
+ {<<"apply-to">>, case ApplyTo of
+ undefined -> <<"all">>;
+ _ -> ApplyTo
+ end}],
+ set0(Type, VHost, Name, PolicyProps, ActingUser).
+
+set0(Type, VHost, Name, Term, ActingUser) ->
+ rabbit_runtime_parameters:set_any(VHost, Type, Name, Term, ActingUser).
+
+delete_op(VHost, Name, ActingUser) ->
+ rabbit_runtime_parameters:clear_any(VHost, <<"operator_policy">>, Name, ActingUser).
+
+delete(VHost, Name, ActingUser) ->
+ rabbit_runtime_parameters:clear_any(VHost, <<"policy">>, Name, ActingUser).
+
+lookup_op(VHost, Name) ->
+ case rabbit_runtime_parameters:lookup(VHost, <<"operator_policy">>, Name) of
+ not_found -> not_found;
+ P -> p(P, fun ident/1)
+ end.
+
+lookup(VHost, Name) ->
+ case rabbit_runtime_parameters:lookup(VHost, <<"policy">>, Name) of
+ not_found -> not_found;
+ P -> p(P, fun ident/1)
+ end.
+
+list_op() ->
+ list_op('_').
+
+list_op(VHost) ->
+ list0_op(VHost, fun ident/1).
+
+list_formatted_op(VHost) ->
+ order_policies(list0_op(VHost, fun rabbit_json:encode/1)).
+
+list_formatted_op(VHost, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(AggregatorPid, Ref,
+ fun(P) -> P end, list_formatted_op(VHost)).
+
+list0_op(VHost, DefnFun) ->
+ [p(P, DefnFun)
+ || P <- rabbit_runtime_parameters:list(VHost, <<"operator_policy">>)].
+
+
+list() ->
+ list('_').
+
+list(VHost) ->
+ list0(VHost, fun ident/1).
+
+list_formatted(VHost) ->
+ order_policies(list0(VHost, fun rabbit_json:encode/1)).
+
+list_formatted(VHost, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(AggregatorPid, Ref,
+ fun(P) -> P end, list_formatted(VHost)).
+
+list0(VHost, DefnFun) ->
+ [p(P, DefnFun) || P <- rabbit_runtime_parameters:list(VHost, <<"policy">>)].
+
+order_policies(PropList) ->
+ lists:sort(fun (A, B) -> not sort_pred(A, B) end, PropList).
+
+p(Parameter, DefnFun) ->
+ Value = pget(value, Parameter),
+ [{vhost, pget(vhost, Parameter)},
+ {name, pget(name, Parameter)},
+ {pattern, pget(<<"pattern">>, Value)},
+ {'apply-to', pget(<<"apply-to">>, Value)},
+ {definition, DefnFun(pget(<<"definition">>, Value))},
+ {priority, pget(<<"priority">>, Value)}].
+
+ident(X) -> X.
+
+info_keys() -> [vhost, name, 'apply-to', pattern, definition, priority].
+
+%%----------------------------------------------------------------------------
+
+validate(_VHost, <<"policy">>, Name, Term, _User) ->
+ rabbit_parameter_validation:proplist(
+ Name, policy_validation(), Term);
+validate(_VHost, <<"operator_policy">>, Name, Term, _User) ->
+ rabbit_parameter_validation:proplist(
+ Name, operator_policy_validation(), Term).
+
+notify(VHost, <<"policy">>, Name, Term, ActingUser) ->
+ rabbit_event:notify(policy_set, [{name, Name}, {vhost, VHost},
+ {user_who_performed_action, ActingUser} | Term]),
+ update_policies(VHost);
+notify(VHost, <<"operator_policy">>, Name, Term, ActingUser) ->
+ rabbit_event:notify(policy_set, [{name, Name}, {vhost, VHost},
+ {user_who_performed_action, ActingUser} | Term]),
+ update_policies(VHost).
+
+notify_clear(VHost, <<"policy">>, Name, ActingUser) ->
+ rabbit_event:notify(policy_cleared, [{name, Name}, {vhost, VHost},
+ {user_who_performed_action, ActingUser}]),
+ update_policies(VHost);
+notify_clear(VHost, <<"operator_policy">>, Name, ActingUser) ->
+ rabbit_event:notify(operator_policy_cleared,
+ [{name, Name}, {vhost, VHost},
+ {user_who_performed_action, ActingUser}]),
+ update_policies(VHost).
+
+%%----------------------------------------------------------------------------
+
+%% [1] We need to prevent this from becoming O(n^2) in a similar
+%% manner to rabbit_binding:remove_for_{source,destination}. So see
+%% the comment in rabbit_binding:lock_route_tables/0 for more rationale.
+%% [2] We could be here in a post-tx fun after the vhost has been
+%% deleted; in which case it's fine to do nothing.
+update_policies(VHost) ->
+ Tabs = [rabbit_queue, rabbit_durable_queue,
+ rabbit_exchange, rabbit_durable_exchange],
+ {Xs, Qs} = rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [mnesia:lock({table, T}, write) || T <- Tabs], %% [1]
+ case catch {list(VHost), list_op(VHost)} of
+ {'EXIT', {throw, {error, {no_such_vhost, _}}}} ->
+ {[], []}; %% [2]
+ {'EXIT', Exit} ->
+ exit(Exit);
+ {Policies, OpPolicies} ->
+ {[update_exchange(X, Policies, OpPolicies) ||
+ X <- rabbit_exchange:list(VHost)],
+ [update_queue(Q, Policies, OpPolicies) ||
+ Q <- rabbit_amqqueue:list(VHost)]}
+ end
+ end),
+ [catch notify(X) || X <- Xs],
+ [catch notify(Q) || Q <- Qs],
+ ok.
+
+update_exchange(X = #exchange{name = XName,
+ policy = OldPolicy,
+ operator_policy = OldOpPolicy},
+ Policies, OpPolicies) ->
+ case {match(XName, Policies), match(XName, OpPolicies)} of
+ {OldPolicy, OldOpPolicy} -> no_change;
+ {NewPolicy, NewOpPolicy} ->
+ NewExchange = rabbit_exchange:update(
+ XName,
+ fun(X0) ->
+ rabbit_exchange_decorator:set(
+ X0 #exchange{policy = NewPolicy,
+ operator_policy = NewOpPolicy})
+ end),
+ case NewExchange of
+ #exchange{} = X1 -> {X, X1};
+ not_found -> {X, X }
+ end
+ end.
+
+update_queue(Q0, Policies, OpPolicies) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ OldPolicy = amqqueue:get_policy(Q0),
+ OldOpPolicy = amqqueue:get_operator_policy(Q0),
+ case {match(QName, Policies), match(QName, OpPolicies)} of
+ {OldPolicy, OldOpPolicy} -> no_change;
+ {NewPolicy, NewOpPolicy} ->
+ F = fun (QFun0) ->
+ QFun1 = amqqueue:set_policy(QFun0, NewPolicy),
+ QFun2 = amqqueue:set_operator_policy(QFun1, NewOpPolicy),
+ NewPolicyVersion = amqqueue:get_policy_version(QFun2) + 1,
+ QFun3 = amqqueue:set_policy_version(QFun2, NewPolicyVersion),
+ rabbit_queue_decorator:set(QFun3)
+ end,
+ NewQueue = rabbit_amqqueue:update(QName, F),
+ case NewQueue of
+ Q1 when ?is_amqqueue(Q1) ->
+ {Q0, Q1};
+ not_found ->
+ {Q0, Q0}
+ end
+ end.
+
+notify(no_change)->
+ ok;
+notify({X1 = #exchange{}, X2 = #exchange{}}) ->
+ rabbit_exchange:policy_changed(X1, X2);
+notify({Q1, Q2}) when ?is_amqqueue(Q1), ?is_amqqueue(Q2) ->
+ rabbit_amqqueue:policy_changed(Q1, Q2).
+
+match(Name, Policies) ->
+ case match_all(Name, Policies) of
+ [] -> undefined;
+ [Policy | _] -> Policy
+ end.
+
+match_all(Name, Policies) ->
+ lists:sort(fun sort_pred/2, [P || P <- Policies, matches(Name, P)]).
+
+matches(#resource{name = Name, kind = Kind, virtual_host = VHost} = Resource, Policy) ->
+ matches_type(Kind, pget('apply-to', Policy)) andalso
+ is_applicable(Resource, pget(definition, Policy)) andalso
+ match =:= re:run(Name, pget(pattern, Policy), [{capture, none}]) andalso
+ VHost =:= pget(vhost, Policy).
+
+matches_type(exchange, <<"exchanges">>) -> true;
+matches_type(queue, <<"queues">>) -> true;
+matches_type(exchange, <<"all">>) -> true;
+matches_type(queue, <<"all">>) -> true;
+matches_type(_, _) -> false.
+
+sort_pred(A, B) -> pget(priority, A) >= pget(priority, B).
+
+is_applicable(#resource{kind = queue} = Resource, Policy) ->
+ rabbit_amqqueue:is_policy_applicable(Resource, to_list(Policy));
+is_applicable(_, _) ->
+ true.
+
+to_list(L) when is_list(L) ->
+ L;
+to_list(M) when is_map(M) ->
+ maps:to_list(M).
+
+%%----------------------------------------------------------------------------
+
+operator_policy_validation() ->
+ [{<<"priority">>, fun rabbit_parameter_validation:number/2, mandatory},
+ {<<"pattern">>, fun rabbit_parameter_validation:regex/2, mandatory},
+ {<<"apply-to">>, fun apply_to_validation/2, optional},
+ {<<"definition">>, fun validation_op/2, mandatory}].
+
+policy_validation() ->
+ [{<<"priority">>, fun rabbit_parameter_validation:number/2, mandatory},
+ {<<"pattern">>, fun rabbit_parameter_validation:regex/2, mandatory},
+ {<<"apply-to">>, fun apply_to_validation/2, optional},
+ {<<"definition">>, fun validation/2, mandatory}].
+
+validation_op(Name, Terms) ->
+ validation(Name, Terms, operator_policy_validator).
+
+validation(Name, Terms) ->
+ validation(Name, Terms, policy_validator).
+
+validation(_Name, [], _Validator) ->
+ {error, "no policy provided", []};
+validation(Name, Terms0, Validator) when is_map(Terms0) ->
+ Terms = maps:to_list(Terms0),
+ validation(Name, Terms, Validator);
+validation(_Name, Terms, Validator) when is_list(Terms) ->
+ {Keys, Modules} = lists:unzip(
+ rabbit_registry:lookup_all(Validator)),
+ [] = dups(Keys), %% ASSERTION
+ Validators = lists:zipwith(fun (M, K) -> {M, a2b(K)} end, Modules, Keys),
+ case is_proplist(Terms) of
+ true -> {TermKeys, _} = lists:unzip(Terms),
+ case dups(TermKeys) of
+ [] -> validation0(Validators, Terms);
+ Dup -> {error, "~p duplicate keys not allowed", [Dup]}
+ end;
+ false -> {error, "definition must be a dictionary: ~p", [Terms]}
+ end;
+validation(Name, Term, Validator) ->
+ {error, "parse error while reading policy ~s: ~p. Validator: ~p.",
+ [Name, Term, Validator]}.
+
+validation0(Validators, Terms) ->
+ case lists:foldl(
+ fun (Mod, {ok, TermsLeft}) ->
+ ModKeys = proplists:get_all_values(Mod, Validators),
+ case [T || {Key, _} = T <- TermsLeft,
+ lists:member(Key, ModKeys)] of
+ [] -> {ok, TermsLeft};
+ Scope -> {Mod:validate_policy(Scope), TermsLeft -- Scope}
+ end;
+ (_, Acc) ->
+ Acc
+ end, {ok, Terms}, proplists:get_keys(Validators)) of
+ {ok, []} ->
+ ok;
+ {ok, Unvalidated} ->
+ {error, "~p are not recognised policy settings", [Unvalidated]};
+ {Error, _} ->
+ Error
+ end.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+dups(L) -> L -- lists:usort(L).
+
+is_proplist(L) -> length(L) =:= length([I || I = {_, _} <- L]).
+
+apply_to_validation(_Name, <<"all">>) -> ok;
+apply_to_validation(_Name, <<"exchanges">>) -> ok;
+apply_to_validation(_Name, <<"queues">>) -> ok;
+apply_to_validation(_Name, Term) ->
+ {error, "apply-to '~s' unrecognised; should be 'queues', 'exchanges' "
+ "or 'all'", [Term]}.
diff --git a/deps/rabbit/src/rabbit_policy_merge_strategy.erl b/deps/rabbit/src/rabbit_policy_merge_strategy.erl
new file mode 100644
index 0000000000..f2b79e5862
--- /dev/null
+++ b/deps/rabbit/src/rabbit_policy_merge_strategy.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_policy_merge_strategy).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-callback merge_policy_value(binary(), Value, Value) ->
+ Value
+ when Value :: term().
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit/src/rabbit_prelaunch_cluster.erl b/deps/rabbit/src/rabbit_prelaunch_cluster.erl
new file mode 100644
index 0000000000..9d3cda99e3
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prelaunch_cluster.erl
@@ -0,0 +1,22 @@
+-module(rabbit_prelaunch_cluster).
+
+-export([setup/1]).
+
+setup(Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Clustering =="),
+ rabbit_log_prelaunch:debug("Preparing cluster status files"),
+ rabbit_node_monitor:prepare_cluster_status_files(),
+ case Context of
+ #{initial_pass := true} ->
+ rabbit_log_prelaunch:debug("Upgrading Mnesia schema"),
+ ok = rabbit_upgrade:maybe_upgrade_mnesia();
+ _ ->
+ ok
+ end,
+ %% It's important that the consistency check happens after
+ %% the upgrade, since if we are a secondary node the
+ %% primary node will have forgotten us
+ rabbit_log_prelaunch:debug("Checking cluster consistency"),
+ rabbit_mnesia:check_cluster_consistency(),
+ ok.
diff --git a/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl b/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl
new file mode 100644
index 0000000000..57fe32f8e6
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prelaunch_enabled_plugins_file).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([setup/1]).
+
+setup(Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Enabled plugins file =="),
+ update_enabled_plugins_file(Context).
+
+%% -------------------------------------------------------------------
+%% `enabled_plugins` file content initialization.
+%% -------------------------------------------------------------------
+
+update_enabled_plugins_file(#{enabled_plugins := undefined}) ->
+ ok;
+update_enabled_plugins_file(#{enabled_plugins := all,
+ plugins_path := Path} = Context) ->
+ List = [P#plugin.name || P <- rabbit_plugins:list(Path)],
+ do_update_enabled_plugins_file(Context, List);
+update_enabled_plugins_file(#{enabled_plugins := List} = Context) ->
+ do_update_enabled_plugins_file(Context, List).
+
+do_update_enabled_plugins_file(#{enabled_plugins_file := File}, List) ->
+ SortedList = lists:usort(List),
+ case SortedList of
+ [] ->
+ rabbit_log_prelaunch:debug("Marking all plugins as disabled");
+ _ ->
+ rabbit_log_prelaunch:debug(
+ "Marking the following plugins as enabled:"),
+ [rabbit_log_prelaunch:debug(" - ~s", [P]) || P <- SortedList]
+ end,
+ Content = io_lib:format("~p.~n", [SortedList]),
+ case file:write_file(File, Content) of
+ ok ->
+ rabbit_log_prelaunch:debug("Wrote plugins file: ~ts", [File]),
+ ok;
+ {error, Reason} ->
+ rabbit_log_prelaunch:error(
+ "Failed to update enabled plugins file \"~ts\" "
+ "from $RABBITMQ_ENABLED_PLUGINS: ~ts",
+ [File, file:format_error(Reason)]),
+ throw({error, failed_to_update_enabled_plugins_file})
+ end.
diff --git a/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl b/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl
new file mode 100644
index 0000000000..cd7b276f4c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prelaunch_feature_flags).
+
+-export([setup/1]).
+
+setup(#{feature_flags_file := FFFile}) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Feature flags =="),
+ case filelib:ensure_dir(FFFile) of
+ ok ->
+ rabbit_log_prelaunch:debug("Initializing feature flags registry"),
+ case rabbit_feature_flags:initialize_registry() of
+ ok ->
+ ok;
+ {error, Reason} ->
+ rabbit_log_prelaunch:error(
+ "Failed to initialize feature flags registry: ~p",
+ [Reason]),
+ throw({error, failed_to_initialize_feature_flags_registry})
+ end;
+ {error, Reason} ->
+ rabbit_log_prelaunch:error(
+ "Failed to create feature flags file \"~ts\" directory: ~ts",
+ [FFFile, file:format_error(Reason)]),
+ throw({error, failed_to_create_feature_flags_file_directory})
+ end.
diff --git a/deps/rabbit/src/rabbit_prelaunch_logging.erl b/deps/rabbit/src/rabbit_prelaunch_logging.erl
new file mode 100644
index 0000000000..6e3f040ec5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prelaunch_logging.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prelaunch_logging).
+
+-export([setup/1]).
+
+setup(Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Logging =="),
+ ok = set_ERL_CRASH_DUMP_envvar(Context),
+ ok = configure_lager(Context).
+
+set_ERL_CRASH_DUMP_envvar(#{log_base_dir := LogBaseDir}) ->
+ case os:getenv("ERL_CRASH_DUMP") of
+ false ->
+ ErlCrashDump = filename:join(LogBaseDir, "erl_crash.dump"),
+ rabbit_log_prelaunch:debug(
+ "Setting $ERL_CRASH_DUMP environment variable to \"~ts\"",
+ [ErlCrashDump]),
+ os:putenv("ERL_CRASH_DUMP", ErlCrashDump),
+ ok;
+ ErlCrashDump ->
+ rabbit_log_prelaunch:debug(
+ "$ERL_CRASH_DUMP environment variable already set to \"~ts\"",
+ [ErlCrashDump]),
+ ok
+ end.
+
+configure_lager(#{log_base_dir := LogBaseDir,
+ main_log_file := MainLog,
+ upgrade_log_file := UpgradeLog} = Context) ->
+ {SaslErrorLogger,
+ MainLagerHandler,
+ UpgradeLagerHandler} = case MainLog of
+ "-" ->
+ %% Log to STDOUT.
+ rabbit_log_prelaunch:debug(
+ "Logging to stdout"),
+ {tty,
+ tty,
+ tty};
+ _ ->
+ rabbit_log_prelaunch:debug(
+ "Logging to:"),
+ [rabbit_log_prelaunch:debug(
+ " - ~ts", [Log])
+ || Log <- [MainLog, UpgradeLog]],
+ %% Log to file.
+ {false,
+ MainLog,
+ UpgradeLog}
+ end,
+
+ ok = application:set_env(lager, crash_log, "log/crash.log"),
+
+ Fun = fun({App, Var, Value}) ->
+ case application:get_env(App, Var) of
+ undefined -> ok = application:set_env(App, Var, Value);
+ _ -> ok
+ end
+ end,
+ Vars = [{sasl, sasl_error_logger, SaslErrorLogger},
+ {rabbit, lager_log_root, LogBaseDir},
+ {rabbit, lager_default_file, MainLagerHandler},
+ {rabbit, lager_upgrade_file, UpgradeLagerHandler}],
+ lists:foreach(Fun, Vars),
+
+ ok = rabbit_lager:start_logger(),
+
+ ok = rabbit_prelaunch_early_logging:setup_early_logging(Context, false).
diff --git a/deps/rabbit/src/rabbit_prequeue.erl b/deps/rabbit/src/rabbit_prequeue.erl
new file mode 100644
index 0000000000..b5af8927c7
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prequeue.erl
@@ -0,0 +1,100 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prequeue).
+
+%% This is the initial gen_server that all queue processes start off
+%% as. It handles the decision as to whether we need to start a new
+%% mirror, a new master/unmirrored, or whether we are restarting (and
+%% if so, as what). Thus a crashing queue process can restart from here
+%% and always do the right thing.
+
+-export([start_link/3]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-behaviour(gen_server2).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+%%----------------------------------------------------------------------------
+
+-export_type([start_mode/0]).
+
+-type start_mode() :: 'declare' | 'recovery' | 'slave'.
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(amqqueue:amqqueue(), start_mode(), pid())
+ -> rabbit_types:ok_pid_or_error().
+
+start_link(Q, StartMode, Marker) ->
+ gen_server2:start_link(?MODULE, {Q, StartMode, Marker}, []).
+
+%%----------------------------------------------------------------------------
+
+init({Q, StartMode, Marker}) ->
+ init(Q, case {is_process_alive(Marker), StartMode} of
+ {true, slave} -> slave;
+ {true, _} -> master;
+ {false, _} -> restart
+ end).
+
+init(Q, master) -> rabbit_amqqueue_process:init(Q);
+init(Q, slave) -> rabbit_mirror_queue_slave:init(Q);
+
+init(Q0, restart) when ?is_amqqueue(Q0) ->
+ QueueName = amqqueue:get_name(Q0),
+ {ok, Q1} = rabbit_amqqueue:lookup(QueueName),
+ QPid = amqqueue:get_pid(Q1),
+ SPids = amqqueue:get_slave_pids(Q1),
+ LocalOrMasterDown = node(QPid) =:= node()
+ orelse not rabbit_mnesia:on_running_node(QPid),
+ Slaves = [SPid || SPid <- SPids, rabbit_mnesia:is_process_alive(SPid)],
+ case rabbit_mnesia:is_process_alive(QPid) of
+ true -> false = LocalOrMasterDown, %% assertion
+ rabbit_mirror_queue_slave:go(self(), async),
+ rabbit_mirror_queue_slave:init(Q1); %% [1]
+ false -> case LocalOrMasterDown andalso Slaves =:= [] of
+ true -> crash_restart(Q1); %% [2]
+ false -> timer:sleep(25),
+ init(Q1, restart) %% [3]
+ end
+ end.
+%% [1] There is a master on another node. Regardless of whether we
+%% were originally a master or a mirror, we are now a new slave.
+%%
+%% [2] Nothing is alive. We are the last best hope. Try to restart as a master.
+%%
+%% [3] The current master is dead but either there are alive mirrors to
+%% take over or it's all happening on a different node anyway. This is
+%% not a stable situation. Sleep and wait for somebody else to make a
+%% move.
+
+crash_restart(Q0) when ?is_amqqueue(Q0) ->
+ QueueName = amqqueue:get_name(Q0),
+ rabbit_log:error("Restarting crashed ~s.~n", [rabbit_misc:rs(QueueName)]),
+ gen_server2:cast(self(), init),
+ Q1 = amqqueue:set_pid(Q0, self()),
+ rabbit_amqqueue_process:init(Q1).
+
+%%----------------------------------------------------------------------------
+
+%% This gen_server2 always hands over to some other module at the end
+%% of init/1.
+-spec handle_call(_, _, _) -> no_return().
+handle_call(_Msg, _From, _State) -> exit(unreachable).
+-spec handle_cast(_, _) -> no_return().
+handle_cast(_Msg, _State) -> exit(unreachable).
+-spec handle_info(_, _) -> no_return().
+handle_info(_Msg, _State) -> exit(unreachable).
+-spec terminate(_, _) -> no_return().
+terminate(_Reason, _State) -> exit(unreachable).
+-spec code_change(_, _, _) -> no_return().
+code_change(_OldVsn, _State, _Extra) -> exit(unreachable).
diff --git a/deps/rabbit/src/rabbit_priority_queue.erl b/deps/rabbit/src/rabbit_priority_queue.erl
new file mode 100644
index 0000000000..4b41b8dfbd
--- /dev/null
+++ b/deps/rabbit/src/rabbit_priority_queue.erl
@@ -0,0 +1,688 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2015-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_priority_queue).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("amqqueue.hrl").
+
+-behaviour(rabbit_backing_queue).
+
+%% enabled unconditionally. Disabling priority queuing after
+%% it has been enabled is dangerous.
+-rabbit_boot_step({?MODULE,
+ [{description, "enable priority queue"},
+ {mfa, {?MODULE, enable, []}},
+ {requires, pre_boot},
+ {enables, kernel_ready}]}).
+
+-export([enable/0]).
+
+-export([start/2, stop/1]).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+ purge/1, purge_acks/1,
+ publish/6, publish_delivered/5, discard/4, drain_confirmed/1,
+ batch_publish/4, batch_publish_delivered/4,
+ dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+ ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+ set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+ handle_pre_hibernate/1, resume/1, msg_rates/1,
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4, handle_info/2]).
+
+-record(state, {bq, bqss, max_priority}).
+-record(passthrough, {bq, bqs}).
+
+%% See 'note on suffixes' below
+-define(passthrough1(F), State#passthrough{bqs = BQ:F}).
+-define(passthrough2(F),
+ {Res, BQS1} = BQ:F, {Res, State#passthrough{bqs = BQS1}}).
+-define(passthrough3(F),
+ {Res1, Res2, BQS1} = BQ:F, {Res1, Res2, State#passthrough{bqs = BQS1}}).
+
+%% This module adds support for priority queues.
+%%
+%% Priority queues have one backing queue per priority. Backing queue functions
+%% then produce a list of results for each BQ and fold over them, sorting
+%% by priority.
+%%
+%%For queues that do not
+%% have priorities enabled, the functions in this module delegate to
+%% their "regular" backing queue module counterparts. See the `passthrough`
+%% record and passthrough{1,2,3} macros.
+%%
+%% Delivery to consumers happens by first "running" the queue with
+%% the highest priority until there are no more messages to deliver,
+%% then the next one, and so on. This offers good prioritisation
+%% but may result in lower priority messages not being delivered
+%% when there's a high ingress rate of messages with higher priority.
+
+enable() ->
+ {ok, RealBQ} = application:get_env(rabbit, backing_queue_module),
+ case RealBQ of
+ ?MODULE -> ok;
+ _ -> rabbit_log:info("Priority queues enabled, real BQ is ~s~n",
+ [RealBQ]),
+ application:set_env(
+ rabbitmq_priority_queue, backing_queue_module, RealBQ),
+ application:set_env(rabbit, backing_queue_module, ?MODULE)
+ end.
+
+%%----------------------------------------------------------------------------
+
+start(VHost, QNames) ->
+ BQ = bq(),
+ %% TODO this expand-collapse dance is a bit ridiculous but it's what
+ %% rabbit_amqqueue:recover/0 expects. We could probably simplify
+ %% this if we rejigged recovery a bit.
+ {DupNames, ExpNames} = expand_queues(QNames),
+ case BQ:start(VHost, ExpNames) of
+ {ok, ExpRecovery} ->
+ {ok, collapse_recovery(QNames, DupNames, ExpRecovery)};
+ Else ->
+ Else
+ end.
+
+stop(VHost) ->
+ BQ = bq(),
+ BQ:stop(VHost).
+
+%%----------------------------------------------------------------------------
+
+mutate_name(P, Q) when ?is_amqqueue(Q) ->
+ Res0 = #resource{name = QNameBin0} = amqqueue:get_name(Q),
+ QNameBin1 = mutate_name_bin(P, QNameBin0),
+ Res1 = Res0#resource{name = QNameBin1},
+ amqqueue:set_name(Q, Res1).
+
+mutate_name_bin(P, NameBin) ->
+ <<NameBin/binary, 0, P:8>>.
+
+expand_queues(QNames) ->
+ lists:unzip(
+ lists:append([expand_queue(QName) || QName <- QNames])).
+
+expand_queue(QName = #resource{name = QNameBin}) ->
+ {ok, Q} = rabbit_misc:dirty_read({rabbit_durable_queue, QName}),
+ case priorities(Q) of
+ none -> [{QName, QName}];
+ Ps -> [{QName, QName#resource{name = mutate_name_bin(P, QNameBin)}}
+ || P <- Ps]
+ end.
+
+collapse_recovery(QNames, DupNames, Recovery) ->
+ NameToTerms = lists:foldl(fun({Name, RecTerm}, Dict) ->
+ dict:append(Name, RecTerm, Dict)
+ end, dict:new(), lists:zip(DupNames, Recovery)),
+ [dict:fetch(Name, NameToTerms) || Name <- QNames].
+
+priorities(Q) when ?is_amqqueue(Q) ->
+ Args = amqqueue:get_arguments(Q),
+ Ints = [long, short, signedint, byte, unsignedbyte, unsignedshort, unsignedint],
+ case rabbit_misc:table_lookup(Args, <<"x-max-priority">>) of
+ {Type, RequestedMax} ->
+ case lists:member(Type, Ints) of
+ false -> none;
+ true ->
+ Max = min(RequestedMax, ?MAX_SUPPORTED_PRIORITY),
+ lists:reverse(lists:seq(0, Max))
+ end;
+ _ -> none
+ end.
+
+%%----------------------------------------------------------------------------
+
+init(Q, Recover, AsyncCallback) ->
+ BQ = bq(),
+ case priorities(Q) of
+ none -> RealRecover = case Recover of
+ [R] -> R; %% [0]
+ R -> R
+ end,
+ #passthrough{bq = BQ,
+ bqs = BQ:init(Q, RealRecover, AsyncCallback)};
+ Ps -> Init = fun (P, Term) ->
+ BQ:init(
+ mutate_name(P, Q), Term,
+ fun (M, F) -> AsyncCallback(M, {P, F}) end)
+ end,
+ BQSs = case have_recovery_terms(Recover) of
+ false -> [{P, Init(P, Recover)} || P <- Ps];
+ _ -> PsTerms = lists:zip(Ps, Recover),
+ [{P, Init(P, Term)} || {P, Term} <- PsTerms]
+ end,
+ #state{bq = BQ,
+ bqss = BQSs,
+ max_priority = hd(Ps)}
+ end.
+%% [0] collapse_recovery has the effect of making a list of recovery
+%% terms in priority order, even for non priority queues. It's easier
+%% to do that and "unwrap" in init/3 than to have collapse_recovery be
+%% aware of non-priority queues.
+
+have_recovery_terms(new) -> false;
+have_recovery_terms(non_clean_shutdown) -> false;
+have_recovery_terms(_) -> true.
+
+terminate(Reason, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:terminate(Reason, BQSN) end, State);
+terminate(Reason, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(terminate(Reason, BQS)).
+
+delete_and_terminate(Reason, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) ->
+ BQ:delete_and_terminate(Reason, BQSN)
+ end, State);
+delete_and_terminate(Reason, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(delete_and_terminate(Reason, BQS)).
+
+delete_crashed(Q) ->
+ BQ = bq(),
+ case priorities(Q) of
+ none -> BQ:delete_crashed(Q);
+ Ps -> [BQ:delete_crashed(mutate_name(P, Q)) || P <- Ps]
+ end.
+
+purge(State = #state{bq = BQ}) ->
+ fold_add2(fun (_P, BQSN) -> BQ:purge(BQSN) end, State);
+purge(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(purge(BQS)).
+
+purge_acks(State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:purge_acks(BQSN) end, State);
+purge_acks(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(purge_acks(BQS)).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State = #state{bq = BQ}) ->
+ pick1(fun (_P, BQSN) ->
+ BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQSN)
+ end, Msg, State);
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)).
+
+batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
+ PubMap = partition_publish_batch(Publishes, MaxP),
+ lists:foldl(
+ fun ({Priority, Pubs}, St) ->
+ pick1(fun (_P, BQSN) ->
+ BQ:batch_publish(Pubs, ChPid, Flow, BQSN)
+ end, Priority, St)
+ end, State, maps:to_list(PubMap));
+batch_publish(Publishes, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(batch_publish(Publishes, ChPid, Flow, BQS)).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State = #state{bq = BQ}) ->
+ pick2(fun (P, BQSN) ->
+ {AckTag, BQSN1} = BQ:publish_delivered(
+ Msg, MsgProps, ChPid, Flow, BQSN),
+ {{P, AckTag}, BQSN1}
+ end, Msg, State);
+publish_delivered(Msg, MsgProps, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)).
+
+batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
+ PubMap = partition_publish_delivered_batch(Publishes, MaxP),
+ {PrioritiesAndAcks, State1} =
+ lists:foldl(
+ fun ({Priority, Pubs}, {PriosAndAcks, St}) ->
+ {PriosAndAcks1, St1} =
+ pick2(fun (P, BQSN) ->
+ {AckTags, BQSN1} =
+ BQ:batch_publish_delivered(
+ Pubs, ChPid, Flow, BQSN),
+ {priority_on_acktags(P, AckTags), BQSN1}
+ end, Priority, St),
+ {[PriosAndAcks1 | PriosAndAcks], St1}
+ end, {[], State}, maps:to_list(PubMap)),
+ {lists:reverse(PrioritiesAndAcks), State1};
+batch_publish_delivered(Publishes, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(batch_publish_delivered(Publishes, ChPid, Flow, BQS)).
+
+%% TODO this is a hack. The BQ api does not give us enough information
+%% here - if we had the Msg we could look at its priority and forward
+%% to the appropriate sub-BQ. But we don't so we are stuck.
+%%
+%% But fortunately VQ ignores discard/4, so we can too, *assuming we
+%% are talking to VQ*. discard/4 is used by HA, but that's "above" us
+%% (if in use) so we don't break that either, just some hypothetical
+%% alternate BQ implementation.
+discard(_MsgId, _ChPid, _Flow, State = #state{}) ->
+ State;
+ %% We should have something a bit like this here:
+ %% pick1(fun (_P, BQSN) ->
+ %% BQ:discard(MsgId, ChPid, Flow, BQSN)
+ %% end, Msg, State);
+discard(MsgId, ChPid, Flow, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(discard(MsgId, ChPid, Flow, BQS)).
+
+drain_confirmed(State = #state{bq = BQ}) ->
+ fold_append2(fun (_P, BQSN) -> BQ:drain_confirmed(BQSN) end, State);
+drain_confirmed(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(drain_confirmed(BQS)).
+
+dropwhile(Pred, State = #state{bq = BQ}) ->
+ find2(fun (_P, BQSN) -> BQ:dropwhile(Pred, BQSN) end, undefined, State);
+dropwhile(Pred, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(dropwhile(Pred, BQS)).
+
+%% TODO this is a bit nasty. In the one place where fetchwhile/4 is
+%% actually used the accumulator is a list of acktags, which of course
+%% we need to mutate - so we do that although we are encoding an
+%% assumption here.
+fetchwhile(Pred, Fun, Acc, State = #state{bq = BQ}) ->
+ findfold3(
+ fun (P, BQSN, AccN) ->
+ {Res, AccN1, BQSN1} = BQ:fetchwhile(Pred, Fun, AccN, BQSN),
+ {Res, priority_on_acktags(P, AccN1), BQSN1}
+ end, Acc, undefined, State);
+fetchwhile(Pred, Fun, Acc, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough3(fetchwhile(Pred, Fun, Acc, BQS)).
+
+fetch(AckRequired, State = #state{bq = BQ}) ->
+ find2(
+ fun (P, BQSN) ->
+ case BQ:fetch(AckRequired, BQSN) of
+ {empty, BQSN1} -> {empty, BQSN1};
+ {{Msg, Del, ATag}, BQSN1} -> {{Msg, Del, {P, ATag}}, BQSN1}
+ end
+ end, empty, State);
+fetch(AckRequired, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(fetch(AckRequired, BQS)).
+
+drop(AckRequired, State = #state{bq = BQ}) ->
+ find2(fun (P, BQSN) ->
+ case BQ:drop(AckRequired, BQSN) of
+ {empty, BQSN1} -> {empty, BQSN1};
+ {{MsgId, AckTag}, BQSN1} -> {{MsgId, {P, AckTag}}, BQSN1}
+ end
+ end, empty, State);
+drop(AckRequired, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(drop(AckRequired, BQS)).
+
+ack(AckTags, State = #state{bq = BQ}) ->
+ fold_by_acktags2(fun (AckTagsN, BQSN) ->
+ BQ:ack(AckTagsN, BQSN)
+ end, AckTags, State);
+ack(AckTags, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(ack(AckTags, BQS)).
+
+requeue(AckTags, State = #state{bq = BQ}) ->
+ fold_by_acktags2(fun (AckTagsN, BQSN) ->
+ BQ:requeue(AckTagsN, BQSN)
+ end, AckTags, State);
+requeue(AckTags, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(requeue(AckTags, BQS)).
+
+%% Similar problem to fetchwhile/4
+ackfold(MsgFun, Acc, State = #state{bq = BQ}, AckTags) ->
+ AckTagsByPriority = partition_acktags(AckTags),
+ fold2(
+ fun (P, BQSN, AccN) ->
+ case maps:find(P, AckTagsByPriority) of
+ {ok, ATagsN} -> {AccN1, BQSN1} =
+ BQ:ackfold(MsgFun, AccN, BQSN, ATagsN),
+ {priority_on_acktags(P, AccN1), BQSN1};
+ error -> {AccN, BQSN}
+ end
+ end, Acc, State);
+ackfold(MsgFun, Acc, State = #passthrough{bq = BQ, bqs = BQS}, AckTags) ->
+ ?passthrough2(ackfold(MsgFun, Acc, BQS, AckTags)).
+
+fold(Fun, Acc, State = #state{bq = BQ}) ->
+ fold2(fun (_P, BQSN, AccN) -> BQ:fold(Fun, AccN, BQSN) end, Acc, State);
+fold(Fun, Acc, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(fold(Fun, Acc, BQS)).
+
+len(#state{bq = BQ, bqss = BQSs}) ->
+ add0(fun (_P, BQSN) -> BQ:len(BQSN) end, BQSs);
+len(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:len(BQS).
+
+is_empty(#state{bq = BQ, bqss = BQSs}) ->
+ all0(fun (_P, BQSN) -> BQ:is_empty(BQSN) end, BQSs);
+is_empty(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:is_empty(BQS).
+
+depth(#state{bq = BQ, bqss = BQSs}) ->
+ add0(fun (_P, BQSN) -> BQ:depth(BQSN) end, BQSs);
+depth(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:depth(BQS).
+
+set_ram_duration_target(DurationTarget, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) ->
+ BQ:set_ram_duration_target(DurationTarget, BQSN)
+ end, State);
+set_ram_duration_target(DurationTarget,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(set_ram_duration_target(DurationTarget, BQS)).
+
+ram_duration(State = #state{bq = BQ}) ->
+ fold_min2(fun (_P, BQSN) -> BQ:ram_duration(BQSN) end, State);
+ram_duration(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(ram_duration(BQS)).
+
+needs_timeout(#state{bq = BQ, bqss = BQSs}) ->
+ fold0(fun (_P, _BQSN, timed) -> timed;
+ (_P, BQSN, idle) -> case BQ:needs_timeout(BQSN) of
+ timed -> timed;
+ _ -> idle
+ end;
+ (_P, BQSN, false) -> BQ:needs_timeout(BQSN)
+ end, false, BQSs);
+needs_timeout(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:needs_timeout(BQS).
+
+timeout(State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:timeout(BQSN) end, State);
+timeout(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(timeout(BQS)).
+
+handle_pre_hibernate(State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) ->
+ BQ:handle_pre_hibernate(BQSN)
+ end, State);
+handle_pre_hibernate(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(handle_pre_hibernate(BQS)).
+
+handle_info(Msg, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:handle_info(Msg, BQSN) end, State);
+handle_info(Msg, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(handle_info(Msg, BQS)).
+
+resume(State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:resume(BQSN) end, State);
+resume(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(resume(BQS)).
+
+msg_rates(#state{bq = BQ, bqss = BQSs}) ->
+ fold0(fun(_P, BQSN, {InN, OutN}) ->
+ {In, Out} = BQ:msg_rates(BQSN),
+ {InN + In, OutN + Out}
+ end, {0.0, 0.0}, BQSs);
+msg_rates(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:msg_rates(BQS).
+
+info(backing_queue_status, #state{bq = BQ, bqss = BQSs}) ->
+ fold0(fun (P, BQSN, Acc) ->
+ combine_status(P, BQ:info(backing_queue_status, BQSN), Acc)
+ end, nothing, BQSs);
+info(head_message_timestamp, #state{bq = BQ, bqss = BQSs}) ->
+ find_head_message_timestamp(BQ, BQSs, '');
+info(Item, #state{bq = BQ, bqss = BQSs}) ->
+ fold0(fun (_P, BQSN, Acc) ->
+ Acc + BQ:info(Item, BQSN)
+ end, 0, BQSs);
+info(Item, #passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:info(Item, BQS).
+
+invoke(Mod, {P, Fun}, State = #state{bq = BQ}) ->
+ pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State);
+invoke(Mod, Fun, State = #state{bq = BQ, max_priority = P}) ->
+ pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State);
+invoke(Mod, Fun, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(invoke(Mod, Fun, BQS)).
+
+is_duplicate(Msg, State = #state{bq = BQ}) ->
+ pick2(fun (_P, BQSN) -> BQ:is_duplicate(Msg, BQSN) end, Msg, State);
+is_duplicate(Msg, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(is_duplicate(Msg, BQS)).
+
+set_queue_mode(Mode, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:set_queue_mode(Mode, BQSN) end, State);
+set_queue_mode(Mode, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(set_queue_mode(Mode, BQS)).
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{bqss = [{MaxP, _} |_]}) ->
+ MsgsByPriority = partition_publish_delivered_batch(Msgs, MaxP),
+ lists:foldl(fun (Acks, MAs) ->
+ {P, _AckTag} = hd(Acks),
+ Pubs = maps:get(P, MsgsByPriority),
+ MAs0 = zip_msgs_and_acks(Pubs, Acks),
+ MAs ++ MAs0
+ end, Accumulator, AckTags);
+zip_msgs_and_acks(Msgs, AckTags, Accumulator,
+ #passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:zip_msgs_and_acks(Msgs, AckTags, Accumulator, BQS).
+
+%%----------------------------------------------------------------------------
+
+bq() ->
+ {ok, RealBQ} = application:get_env(
+ rabbitmq_priority_queue, backing_queue_module),
+ RealBQ.
+
+%% Note on suffixes: Many utility functions here have suffixes telling
+%% you the arity of the return type of the BQ function they are
+%% designed to work with.
+%%
+%% 0 - BQ function returns a value and does not modify state
+%% 1 - BQ function just returns a new state
+%% 2 - BQ function returns a 2-tuple of {Result, NewState}
+%% 3 - BQ function returns a 3-tuple of {Result1, Result2, NewState}
+
+%% Fold over results
+fold0(Fun, Acc, [{P, BQSN} | Rest]) -> fold0(Fun, Fun(P, BQSN, Acc), Rest);
+fold0(_Fun, Acc, []) -> Acc.
+
+%% Do all BQs match?
+all0(Pred, BQSs) -> fold0(fun (_P, _BQSN, false) -> false;
+ (P, BQSN, true) -> Pred(P, BQSN)
+ end, true, BQSs).
+
+%% Sum results
+add0(Fun, BQSs) -> fold0(fun (P, BQSN, Acc) -> Acc + Fun(P, BQSN) end, 0, BQSs).
+
+%% Apply for all states
+foreach1(Fun, State = #state{bqss = BQSs}) ->
+ a(State#state{bqss = foreach1(Fun, BQSs, [])}).
+foreach1(Fun, [{Priority, BQSN} | Rest], BQSAcc) ->
+ BQSN1 = Fun(Priority, BQSN),
+ foreach1(Fun, Rest, [{Priority, BQSN1} | BQSAcc]);
+foreach1(_Fun, [], BQSAcc) ->
+ lists:reverse(BQSAcc).
+
+%% For a given thing, just go to its BQ
+pick1(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
+ {P, BQSN} = priority_bq(Prioritisable, BQSs),
+ a(State#state{bqss = bq_store(P, Fun(P, BQSN), BQSs)}).
+
+%% Fold over results
+fold2(Fun, Acc, State = #state{bqss = BQSs}) ->
+ {Res, BQSs1} = fold2(Fun, Acc, BQSs, []),
+ {Res, a(State#state{bqss = BQSs1})}.
+
+fold2(Fun, Acc, [{P, BQSN} | Rest], BQSAcc) ->
+ {Acc1, BQSN1} = Fun(P, BQSN, Acc),
+ fold2(Fun, Acc1, Rest, [{P, BQSN1} | BQSAcc]);
+fold2(_Fun, Acc, [], BQSAcc) ->
+ {Acc, lists:reverse(BQSAcc)}.
+
+%% Fold over results assuming results are lists and we want to append them
+fold_append2(Fun, State) ->
+ fold2(fun (P, BQSN, Acc) ->
+ {Res, BQSN1} = Fun(P, BQSN),
+ {Res ++ Acc, BQSN1}
+ end, [], State).
+
+%% Fold over results assuming results are numbers and we want to sum them
+fold_add2(Fun, State) ->
+ fold2(fun (P, BQSN, Acc) ->
+ {Res, BQSN1} = Fun(P, BQSN),
+ {add_maybe_infinity(Res, Acc), BQSN1}
+ end, 0, State).
+
+%% Fold over results assuming results are numbers and we want the minimum
+fold_min2(Fun, State) ->
+ fold2(fun (P, BQSN, Acc) ->
+ {Res, BQSN1} = Fun(P, BQSN),
+ {erlang:min(Res, Acc), BQSN1}
+ end, infinity, State).
+
+%% Fold over results assuming results are lists and we want to append
+%% them, and also that we have some AckTags we want to pass in to each
+%% invocation.
+fold_by_acktags2(Fun, AckTags, State) ->
+ AckTagsByPriority = partition_acktags(AckTags),
+ fold_append2(fun (P, BQSN) ->
+ case maps:find(P, AckTagsByPriority) of
+ {ok, AckTagsN} -> Fun(AckTagsN, BQSN);
+ error -> {[], BQSN}
+ end
+ end, State).
+
+%% For a given thing, just go to its BQ
+pick2(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
+ {P, BQSN} = priority_bq(Prioritisable, BQSs),
+ {Res, BQSN1} = Fun(P, BQSN),
+ {Res, a(State#state{bqss = bq_store(P, BQSN1, BQSs)})}.
+
+%% Run through BQs in priority order until one does not return
+%% {NotFound, NewState} or we have gone through them all.
+find2(Fun, NotFound, State = #state{bqss = BQSs}) ->
+ {Res, BQSs1} = find2(Fun, NotFound, BQSs, []),
+ {Res, a(State#state{bqss = BQSs1})}.
+find2(Fun, NotFound, [{P, BQSN} | Rest], BQSAcc) ->
+ case Fun(P, BQSN) of
+ {NotFound, BQSN1} -> find2(Fun, NotFound, Rest, [{P, BQSN1} | BQSAcc]);
+ {Res, BQSN1} -> {Res, lists:reverse([{P, BQSN1} | BQSAcc]) ++ Rest}
+ end;
+find2(_Fun, NotFound, [], BQSAcc) ->
+ {NotFound, lists:reverse(BQSAcc)}.
+
+%% Run through BQs in priority order like find2 but also folding as we go.
+findfold3(Fun, Acc, NotFound, State = #state{bqss = BQSs}) ->
+ {Res, Acc1, BQSs1} = findfold3(Fun, Acc, NotFound, BQSs, []),
+ {Res, Acc1, a(State#state{bqss = BQSs1})}.
+findfold3(Fun, Acc, NotFound, [{P, BQSN} | Rest], BQSAcc) ->
+ case Fun(P, BQSN, Acc) of
+ {NotFound, Acc1, BQSN1} ->
+ findfold3(Fun, Acc1, NotFound, Rest, [{P, BQSN1} | BQSAcc]);
+ {Res, Acc1, BQSN1} ->
+ {Res, Acc1, lists:reverse([{P, BQSN1} | BQSAcc]) ++ Rest}
+ end;
+findfold3(_Fun, Acc, NotFound, [], BQSAcc) ->
+ {NotFound, Acc, lists:reverse(BQSAcc)}.
+
+bq_fetch(P, []) -> exit({not_found, P});
+bq_fetch(P, [{P, BQSN} | _]) -> {P, BQSN};
+bq_fetch(P, [{_, _BQSN} | T]) -> bq_fetch(P, T).
+
+bq_store(P, BQS, BQSs) ->
+ [{PN, case PN of
+ P -> BQS;
+ _ -> BQSN
+ end} || {PN, BQSN} <- BQSs].
+
+%%
+a(State = #state{bqss = BQSs}) ->
+ Ps = [P || {P, _} <- BQSs],
+ case lists:reverse(lists:usort(Ps)) of
+ Ps -> State;
+ _ -> exit({bad_order, Ps})
+ end.
+
+%%----------------------------------------------------------------------------
+partition_publish_batch(Publishes, MaxP) ->
+ partition_publishes(
+ Publishes, fun ({Msg, _, _}) -> Msg end, MaxP).
+
+partition_publish_delivered_batch(Publishes, MaxP) ->
+ partition_publishes(
+ Publishes, fun ({Msg, _}) -> Msg end, MaxP).
+
+partition_publishes(Publishes, ExtractMsg, MaxP) ->
+ Partitioned =
+ lists:foldl(fun (Pub, Dict) ->
+ Msg = ExtractMsg(Pub),
+ rabbit_misc:maps_cons(priority(Msg, MaxP), Pub, Dict)
+ end, maps:new(), Publishes),
+ maps:map(fun (_P, RevPubs) ->
+ lists:reverse(RevPubs)
+ end, Partitioned).
+
+
+priority_bq(Priority, [{MaxP, _} | _] = BQSs) ->
+ bq_fetch(priority(Priority, MaxP), BQSs).
+
+%% Messages with a priority which is higher than the queue's maximum are treated
+%% as if they were published with the maximum priority.
+priority(undefined, _MaxP) ->
+ 0;
+priority(Priority, MaxP) when is_integer(Priority), Priority =< MaxP ->
+ Priority;
+priority(Priority, MaxP) when is_integer(Priority), Priority > MaxP ->
+ MaxP;
+priority(#basic_message{content = Content}, MaxP) ->
+ priority(rabbit_binary_parser:ensure_content_decoded(Content), MaxP);
+priority(#content{properties = Props}, MaxP) ->
+ #'P_basic'{priority = Priority0} = Props,
+ priority(Priority0, MaxP).
+
+add_maybe_infinity(infinity, _) -> infinity;
+add_maybe_infinity(_, infinity) -> infinity;
+add_maybe_infinity(A, B) -> A + B.
+
+partition_acktags(AckTags) -> partition_acktags(AckTags, maps:new()).
+
+partition_acktags([], Partitioned) ->
+ maps:map(fun (_P, RevAckTags) ->
+ lists:reverse(RevAckTags)
+ end, Partitioned);
+partition_acktags([{P, AckTag} | Rest], Partitioned) ->
+ partition_acktags(Rest, rabbit_misc:maps_cons(P, AckTag, Partitioned)).
+
+priority_on_acktags(P, AckTags) ->
+ [case Tag of
+ _ when is_integer(Tag) -> {P, Tag};
+ _ -> Tag
+ end || Tag <- AckTags].
+
+combine_status(P, New, nothing) ->
+ [{priority_lengths, [{P, proplists:get_value(len, New)}]} | New];
+combine_status(P, New, Old) ->
+ Combined = [{K, cse(V, proplists:get_value(K, Old))} || {K, V} <- New],
+ Lens = [{P, proplists:get_value(len, New)} |
+ proplists:get_value(priority_lengths, Old)],
+ [{priority_lengths, Lens} | Combined].
+
+cse(infinity, _) -> infinity;
+cse(_, infinity) -> infinity;
+%% queue modes
+cse(_, default) -> default;
+cse(default, _) -> default;
+cse(_, lazy) -> lazy;
+cse(lazy, _) -> lazy;
+%% numerical stats
+cse(A, B) when is_number(A) -> A + B;
+cse({delta, _, _, _, _}, _) -> {delta, todo, todo, todo, todo};
+cse(_, _) -> undefined.
+
+%% When asked about 'head_message_timestamp' fro this priority queue, we
+%% walk all the backing queues, starting by the highest priority. Once a
+%% backing queue having messages (ready or unacknowledged) is found, its
+%% 'head_message_timestamp' is returned even if it is null.
+
+find_head_message_timestamp(BQ, [{_, BQSN} | Rest], Timestamp) ->
+ MsgCount = BQ:len(BQSN) + BQ:info(messages_unacknowledged_ram, BQSN),
+ if
+ MsgCount =/= 0 -> BQ:info(head_message_timestamp, BQSN);
+ true -> find_head_message_timestamp(BQ, Rest, Timestamp)
+ end;
+find_head_message_timestamp(_, [], Timestamp) ->
+ Timestamp.
+
+zip_msgs_and_acks(Pubs, AckTags) ->
+ lists:zipwith(
+ fun ({#basic_message{ id = Id }, _Props}, AckTag) ->
+ {Id, AckTag}
+ end, Pubs, AckTags).
diff --git a/deps/rabbit/src/rabbit_queue_consumers.erl b/deps/rabbit/src/rabbit_queue_consumers.erl
new file mode 100644
index 0000000000..4f826f72e8
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_consumers.erl
@@ -0,0 +1,568 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_consumers).
+
+-export([new/0, max_active_priority/1, inactive/1, all/1, all/3, count/0,
+ unacknowledged_message_count/0, add/10, remove/3, erase_ch/2,
+ send_drained/0, deliver/5, record_ack/3, subtract_acks/3,
+ possibly_unblock/3,
+ resume_fun/0, notify_sent_fun/1, activate_limit_fun/0,
+ credit/6, utilisation/1, is_same/3, get_consumer/1, get/3,
+ consumer_tag/1, get_infos/1]).
+
+%%----------------------------------------------------------------------------
+
+-define(QUEUE, lqueue).
+
+-define(UNSENT_MESSAGE_LIMIT, 200).
+
+%% Utilisation average calculations are all in μs.
+-define(USE_AVG_HALF_LIFE, 1000000.0).
+
+-record(state, {consumers, use}).
+
+-record(consumer, {tag, ack_required, prefetch, args, user}).
+
+%% These are held in our process dictionary
+-record(cr, {ch_pid,
+ monitor_ref,
+ acktags,
+ consumer_count,
+ %% Queue of {ChPid, #consumer{}} for consumers which have
+ %% been blocked (rate/prefetch limited) for any reason
+ blocked_consumers,
+ %% The limiter itself
+ limiter,
+ %% Internal flow control for queue -> writer
+ unsent_message_count}).
+
+%%----------------------------------------------------------------------------
+
+-type time_micros() :: non_neg_integer().
+-type ratio() :: float().
+-type state() :: #state{consumers ::priority_queue:q(),
+ use :: {'inactive',
+ time_micros(), time_micros(), ratio()} |
+ {'active', time_micros(), ratio()}}.
+-type consumer() :: #consumer{tag::rabbit_types:ctag(), ack_required::boolean(),
+ prefetch::non_neg_integer(), args::rabbit_framing:amqp_table(),
+ user::rabbit_types:username()}.
+-type ch() :: pid().
+-type ack() :: non_neg_integer().
+-type cr_fun() :: fun ((#cr{}) -> #cr{}).
+-type fetch_result() :: {rabbit_types:basic_message(), boolean(), ack()}.
+
+%%----------------------------------------------------------------------------
+
+-spec new() -> state().
+
+new() -> #state{consumers = priority_queue:new(),
+ use = {active,
+ erlang:monotonic_time(micro_seconds),
+ 1.0}}.
+
+-spec max_active_priority(state()) -> integer() | 'infinity' | 'empty'.
+
+max_active_priority(#state{consumers = Consumers}) ->
+ priority_queue:highest(Consumers).
+
+-spec inactive(state()) -> boolean().
+
+inactive(#state{consumers = Consumers}) ->
+ priority_queue:is_empty(Consumers).
+
+-spec all(state()) -> [{ch(), rabbit_types:ctag(), boolean(),
+ non_neg_integer(), boolean(), atom(),
+ rabbit_framing:amqp_table(), rabbit_types:username()}].
+
+all(State) ->
+ all(State, none, false).
+
+all(#state{consumers = Consumers}, SingleActiveConsumer, SingleActiveConsumerOn) ->
+ lists:foldl(fun (C, Acc) -> consumers(C#cr.blocked_consumers, SingleActiveConsumer, SingleActiveConsumerOn, Acc) end,
+ consumers(Consumers, SingleActiveConsumer, SingleActiveConsumerOn, []), all_ch_record()).
+
+consumers(Consumers, SingleActiveConsumer, SingleActiveConsumerOn, Acc) ->
+ ActiveActivityStatusFun = case SingleActiveConsumerOn of
+ true ->
+ fun({ChPid, Consumer}) ->
+ case SingleActiveConsumer of
+ {ChPid, Consumer} ->
+ {true, single_active};
+ _ ->
+ {false, waiting}
+ end
+ end;
+ false ->
+ fun(_) -> {true, up} end
+ end,
+ priority_queue:fold(
+ fun ({ChPid, Consumer}, _P, Acc1) ->
+ #consumer{tag = CTag, ack_required = Ack, prefetch = Prefetch,
+ args = Args, user = Username} = Consumer,
+ {Active, ActivityStatus} = ActiveActivityStatusFun({ChPid, Consumer}),
+ [{ChPid, CTag, Ack, Prefetch, Active, ActivityStatus, Args, Username} | Acc1]
+ end, Acc, Consumers).
+
+-spec count() -> non_neg_integer().
+
+count() -> lists:sum([Count || #cr{consumer_count = Count} <- all_ch_record()]).
+
+-spec unacknowledged_message_count() -> non_neg_integer().
+
+unacknowledged_message_count() ->
+ lists:sum([?QUEUE:len(C#cr.acktags) || C <- all_ch_record()]).
+
+-spec add(ch(), rabbit_types:ctag(), boolean(), pid(), boolean(),
+ non_neg_integer(), rabbit_framing:amqp_table(), boolean(),
+ rabbit_types:username(), state())
+ -> state().
+
+add(ChPid, CTag, NoAck, LimiterPid, LimiterActive, Prefetch, Args, IsEmpty,
+ Username, State = #state{consumers = Consumers,
+ use = CUInfo}) ->
+ C = #cr{consumer_count = Count,
+ limiter = Limiter} = ch_record(ChPid, LimiterPid),
+ Limiter1 = case LimiterActive of
+ true -> rabbit_limiter:activate(Limiter);
+ false -> Limiter
+ end,
+ C1 = C#cr{consumer_count = Count + 1, limiter = Limiter1},
+ update_ch_record(
+ case parse_credit_args(Prefetch, Args) of
+ {0, auto} -> C1;
+ {_Credit, auto} when NoAck -> C1;
+ {Credit, Mode} -> credit_and_drain(
+ C1, CTag, Credit, Mode, IsEmpty)
+ end),
+ Consumer = #consumer{tag = CTag,
+ ack_required = not NoAck,
+ prefetch = Prefetch,
+ args = Args,
+ user = Username},
+ State#state{consumers = add_consumer({ChPid, Consumer}, Consumers),
+ use = update_use(CUInfo, active)}.
+
+-spec remove(ch(), rabbit_types:ctag(), state()) ->
+ 'not_found' | state().
+
+remove(ChPid, CTag, State = #state{consumers = Consumers}) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{consumer_count = Count,
+ limiter = Limiter,
+ blocked_consumers = Blocked} ->
+ Blocked1 = remove_consumer(ChPid, CTag, Blocked),
+ Limiter1 = case Count of
+ 1 -> rabbit_limiter:deactivate(Limiter);
+ _ -> Limiter
+ end,
+ Limiter2 = rabbit_limiter:forget_consumer(Limiter1, CTag),
+ update_ch_record(C#cr{consumer_count = Count - 1,
+ limiter = Limiter2,
+ blocked_consumers = Blocked1}),
+ State#state{consumers =
+ remove_consumer(ChPid, CTag, Consumers)}
+ end.
+
+-spec erase_ch(ch(), state()) ->
+ 'not_found' | {[ack()], [rabbit_types:ctag()],
+ state()}.
+
+erase_ch(ChPid, State = #state{consumers = Consumers}) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{ch_pid = ChPid,
+ acktags = ChAckTags,
+ blocked_consumers = BlockedQ} ->
+ All = priority_queue:join(Consumers, BlockedQ),
+ ok = erase_ch_record(C),
+ Filtered = priority_queue:filter(chan_pred(ChPid, true), All),
+ {[AckTag || {AckTag, _CTag} <- ?QUEUE:to_list(ChAckTags)],
+ tags(priority_queue:to_list(Filtered)),
+ State#state{consumers = remove_consumers(ChPid, Consumers)}}
+ end.
+
+-spec send_drained() -> 'ok'.
+
+send_drained() -> [update_ch_record(send_drained(C)) || C <- all_ch_record()],
+ ok.
+
+-spec deliver(fun ((boolean()) -> {fetch_result(), T}),
+ rabbit_amqqueue:name(), state(), boolean(),
+ none | {ch(), rabbit_types:ctag()} | {ch(), consumer()}) ->
+ {'delivered', boolean(), T, state()} |
+ {'undelivered', boolean(), state()}.
+
+deliver(FetchFun, QName, State, SingleActiveConsumerIsOn, ActiveConsumer) ->
+ deliver(FetchFun, QName, false, State, SingleActiveConsumerIsOn, ActiveConsumer).
+
+deliver(_FetchFun, _QName, false, State, true, none) ->
+ {undelivered, false,
+ State#state{use = update_use(State#state.use, inactive)}};
+deliver(FetchFun, QName, false, State = #state{consumers = Consumers}, true, SingleActiveConsumer) ->
+ {ChPid, Consumer} = SingleActiveConsumer,
+ %% blocked (rate/prefetch limited) consumers are removed from the queue state, but not the exclusive_consumer field,
+ %% so we need to do this check to avoid adding the exclusive consumer to the channel record
+ %% over and over
+ case is_blocked(SingleActiveConsumer) of
+ true ->
+ {undelivered, false,
+ State#state{use = update_use(State#state.use, inactive)}};
+ false ->
+ case deliver_to_consumer(FetchFun, SingleActiveConsumer, QName) of
+ {delivered, R} ->
+ {delivered, false, R, State};
+ undelivered ->
+ {ChPid, Consumer} = SingleActiveConsumer,
+ Consumers1 = remove_consumer(ChPid, Consumer#consumer.tag, Consumers),
+ {undelivered, true,
+ State#state{consumers = Consumers1, use = update_use(State#state.use, inactive)}}
+ end
+ end;
+deliver(FetchFun, QName, ConsumersChanged,
+ State = #state{consumers = Consumers}, false, _SingleActiveConsumer) ->
+ case priority_queue:out_p(Consumers) of
+ {empty, _} ->
+ {undelivered, ConsumersChanged,
+ State#state{use = update_use(State#state.use, inactive)}};
+ {{value, QEntry, Priority}, Tail} ->
+ case deliver_to_consumer(FetchFun, QEntry, QName) of
+ {delivered, R} ->
+ {delivered, ConsumersChanged, R,
+ State#state{consumers = priority_queue:in(QEntry, Priority,
+ Tail)}};
+ undelivered ->
+ deliver(FetchFun, QName, true,
+ State#state{consumers = Tail}, false, _SingleActiveConsumer)
+ end
+ end.
+
+deliver_to_consumer(FetchFun, E = {ChPid, Consumer}, QName) ->
+ C = lookup_ch(ChPid),
+ case is_ch_blocked(C) of
+ true ->
+ block_consumer(C, E),
+ undelivered;
+ false -> case rabbit_limiter:can_send(C#cr.limiter,
+ Consumer#consumer.ack_required,
+ Consumer#consumer.tag) of
+ {suspend, Limiter} ->
+ block_consumer(C#cr{limiter = Limiter}, E),
+ undelivered;
+ {continue, Limiter} ->
+ {delivered, deliver_to_consumer(
+ FetchFun, Consumer,
+ C#cr{limiter = Limiter}, QName)}
+ end
+ end.
+
+deliver_to_consumer(FetchFun,
+ #consumer{tag = CTag,
+ ack_required = AckRequired},
+ C = #cr{ch_pid = ChPid,
+ acktags = ChAckTags,
+ unsent_message_count = Count},
+ QName) ->
+ {{Message, IsDelivered, AckTag}, R} = FetchFun(AckRequired),
+ rabbit_channel:deliver(ChPid, CTag, AckRequired,
+ {QName, self(), AckTag, IsDelivered, Message}),
+ ChAckTags1 = case AckRequired of
+ true -> ?QUEUE:in({AckTag, CTag}, ChAckTags);
+ false -> ChAckTags
+ end,
+ update_ch_record(C#cr{acktags = ChAckTags1,
+ unsent_message_count = Count + 1}),
+ R.
+
+is_blocked(Consumer = {ChPid, _C}) ->
+ #cr{blocked_consumers = BlockedConsumers} = lookup_ch(ChPid),
+ priority_queue:member(Consumer, BlockedConsumers).
+
+-spec record_ack(ch(), pid(), ack()) -> 'ok'.
+
+record_ack(ChPid, LimiterPid, AckTag) ->
+ C = #cr{acktags = ChAckTags} = ch_record(ChPid, LimiterPid),
+ update_ch_record(C#cr{acktags = ?QUEUE:in({AckTag, none}, ChAckTags)}),
+ ok.
+
+-spec subtract_acks(ch(), [ack()], state()) ->
+ 'not_found' | 'unchanged' | {'unblocked', state()}.
+
+subtract_acks(ChPid, AckTags, State) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{acktags = ChAckTags, limiter = Lim} ->
+ {CTagCounts, AckTags2} = subtract_acks(
+ AckTags, [], maps:new(), ChAckTags),
+ {Unblocked, Lim2} =
+ maps:fold(
+ fun (CTag, Count, {UnblockedN, LimN}) ->
+ {Unblocked1, LimN1} =
+ rabbit_limiter:ack_from_queue(LimN, CTag, Count),
+ {UnblockedN orelse Unblocked1, LimN1}
+ end, {false, Lim}, CTagCounts),
+ C2 = C#cr{acktags = AckTags2, limiter = Lim2},
+ case Unblocked of
+ true -> unblock(C2, State);
+ false -> update_ch_record(C2),
+ unchanged
+ end
+ end.
+
+subtract_acks([], [], CTagCounts, AckQ) ->
+ {CTagCounts, AckQ};
+subtract_acks([], Prefix, CTagCounts, AckQ) ->
+ {CTagCounts, ?QUEUE:join(?QUEUE:from_list(lists:reverse(Prefix)), AckQ)};
+subtract_acks([T | TL] = AckTags, Prefix, CTagCounts, AckQ) ->
+ case ?QUEUE:out(AckQ) of
+ {{value, {T, CTag}}, QTail} ->
+ subtract_acks(TL, Prefix,
+ maps:update_with(CTag, fun (Old) -> Old + 1 end, 1, CTagCounts), QTail);
+ {{value, V}, QTail} ->
+ subtract_acks(AckTags, [V | Prefix], CTagCounts, QTail);
+ {empty, _} ->
+ subtract_acks([], Prefix, CTagCounts, AckQ)
+ end.
+
+-spec possibly_unblock(cr_fun(), ch(), state()) ->
+ 'unchanged' | {'unblocked', state()}.
+
+possibly_unblock(Update, ChPid, State) ->
+ case lookup_ch(ChPid) of
+ not_found -> unchanged;
+ C -> C1 = Update(C),
+ case is_ch_blocked(C) andalso not is_ch_blocked(C1) of
+ false -> update_ch_record(C1),
+ unchanged;
+ true -> unblock(C1, State)
+ end
+ end.
+
+unblock(C = #cr{blocked_consumers = BlockedQ, limiter = Limiter},
+ State = #state{consumers = Consumers, use = Use}) ->
+ case lists:partition(
+ fun({_P, {_ChPid, #consumer{tag = CTag}}}) ->
+ rabbit_limiter:is_consumer_blocked(Limiter, CTag)
+ end, priority_queue:to_list(BlockedQ)) of
+ {_, []} ->
+ update_ch_record(C),
+ unchanged;
+ {Blocked, Unblocked} ->
+ BlockedQ1 = priority_queue:from_list(Blocked),
+ UnblockedQ = priority_queue:from_list(Unblocked),
+ update_ch_record(C#cr{blocked_consumers = BlockedQ1}),
+ {unblocked,
+ State#state{consumers = priority_queue:join(Consumers, UnblockedQ),
+ use = update_use(Use, active)}}
+ end.
+
+-spec resume_fun() -> cr_fun().
+
+resume_fun() ->
+ fun (C = #cr{limiter = Limiter}) ->
+ C#cr{limiter = rabbit_limiter:resume(Limiter)}
+ end.
+
+-spec notify_sent_fun(non_neg_integer()) -> cr_fun().
+
+notify_sent_fun(Credit) ->
+ fun (C = #cr{unsent_message_count = Count}) ->
+ C#cr{unsent_message_count = Count - Credit}
+ end.
+
+-spec activate_limit_fun() -> cr_fun().
+
+activate_limit_fun() ->
+ fun (C = #cr{limiter = Limiter}) ->
+ C#cr{limiter = rabbit_limiter:activate(Limiter)}
+ end.
+
+-spec credit(boolean(), integer(), boolean(), ch(), rabbit_types:ctag(),
+ state()) -> 'unchanged' | {'unblocked', state()}.
+
+credit(IsEmpty, Credit, Drain, ChPid, CTag, State) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ unchanged;
+ #cr{limiter = Limiter} = C ->
+ C1 = #cr{limiter = Limiter1} =
+ credit_and_drain(C, CTag, Credit, drain_mode(Drain), IsEmpty),
+ case is_ch_blocked(C1) orelse
+ (not rabbit_limiter:is_consumer_blocked(Limiter, CTag)) orelse
+ rabbit_limiter:is_consumer_blocked(Limiter1, CTag) of
+ true -> update_ch_record(C1),
+ unchanged;
+ false -> unblock(C1, State)
+ end
+ end.
+
+drain_mode(true) -> drain;
+drain_mode(false) -> manual.
+
+-spec utilisation(state()) -> ratio().
+
+utilisation(#state{use = {active, Since, Avg}}) ->
+ use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg);
+utilisation(#state{use = {inactive, Since, Active, Avg}}) ->
+ use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg).
+
+is_same(ChPid, ConsumerTag, {ChPid, #consumer{tag = ConsumerTag}}) ->
+ true;
+is_same(_ChPid, _ConsumerTag, _Consumer) ->
+ false.
+
+get_consumer(#state{consumers = Consumers}) ->
+ case priority_queue:out_p(Consumers) of
+ {{value, Consumer, _Priority}, _Tail} -> Consumer;
+ {empty, _} -> undefined
+ end.
+
+-spec get(ch(), rabbit_types:ctag(), state()) -> undefined | consumer().
+
+get(ChPid, ConsumerTag, #state{consumers = Consumers}) ->
+ Consumers1 = priority_queue:filter(fun ({CP, #consumer{tag = CT}}) ->
+ (CP == ChPid) and (CT == ConsumerTag)
+ end, Consumers),
+ case priority_queue:out_p(Consumers1) of
+ {empty, _} -> undefined;
+ {{value, Consumer, _Priority}, _Tail} -> Consumer
+ end.
+
+-spec get_infos(consumer()) -> term().
+
+get_infos(Consumer) ->
+ {Consumer#consumer.tag,Consumer#consumer.ack_required,
+ Consumer#consumer.prefetch, Consumer#consumer.args}.
+
+-spec consumer_tag(consumer()) -> rabbit_types:ctag().
+
+consumer_tag(#consumer{tag = CTag}) ->
+ CTag.
+
+
+
+%%----------------------------------------------------------------------------
+
+parse_credit_args(Default, Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-credit">>) of
+ {table, T} -> case {rabbit_misc:table_lookup(T, <<"credit">>),
+ rabbit_misc:table_lookup(T, <<"drain">>)} of
+ {{long, C}, {bool, D}} -> {C, drain_mode(D)};
+ _ -> {Default, auto}
+ end;
+ undefined -> {Default, auto}
+ end.
+
+lookup_ch(ChPid) ->
+ case get({ch, ChPid}) of
+ undefined -> not_found;
+ C -> C
+ end.
+
+ch_record(ChPid, LimiterPid) ->
+ Key = {ch, ChPid},
+ case get(Key) of
+ undefined -> MonitorRef = erlang:monitor(process, ChPid),
+ Limiter = rabbit_limiter:client(LimiterPid),
+ C = #cr{ch_pid = ChPid,
+ monitor_ref = MonitorRef,
+ acktags = ?QUEUE:new(),
+ consumer_count = 0,
+ blocked_consumers = priority_queue:new(),
+ limiter = Limiter,
+ unsent_message_count = 0},
+ put(Key, C),
+ C;
+ C = #cr{} -> C
+ end.
+
+update_ch_record(C = #cr{consumer_count = ConsumerCount,
+ acktags = ChAckTags,
+ unsent_message_count = UnsentMessageCount}) ->
+ case {?QUEUE:is_empty(ChAckTags), ConsumerCount, UnsentMessageCount} of
+ {true, 0, 0} -> ok = erase_ch_record(C);
+ _ -> ok = store_ch_record(C)
+ end,
+ C.
+
+store_ch_record(C = #cr{ch_pid = ChPid}) ->
+ put({ch, ChPid}, C),
+ ok.
+
+erase_ch_record(#cr{ch_pid = ChPid, monitor_ref = MonitorRef}) ->
+ erlang:demonitor(MonitorRef),
+ erase({ch, ChPid}),
+ ok.
+
+all_ch_record() -> [C || {{ch, _}, C} <- get()].
+
+block_consumer(C = #cr{blocked_consumers = Blocked}, QEntry) ->
+ update_ch_record(C#cr{blocked_consumers = add_consumer(QEntry, Blocked)}).
+
+is_ch_blocked(#cr{unsent_message_count = Count, limiter = Limiter}) ->
+ Count >= ?UNSENT_MESSAGE_LIMIT orelse rabbit_limiter:is_suspended(Limiter).
+
+send_drained(C = #cr{ch_pid = ChPid, limiter = Limiter}) ->
+ case rabbit_limiter:drained(Limiter) of
+ {[], Limiter} -> C;
+ {CTagCredit, Limiter2} -> rabbit_channel:send_drained(
+ ChPid, CTagCredit),
+ C#cr{limiter = Limiter2}
+ end.
+
+credit_and_drain(C = #cr{ch_pid = ChPid, limiter = Limiter},
+ CTag, Credit, Mode, IsEmpty) ->
+ case rabbit_limiter:credit(Limiter, CTag, Credit, Mode, IsEmpty) of
+ {true, Limiter1} -> rabbit_channel:send_drained(ChPid,
+ [{CTag, Credit}]),
+ C#cr{limiter = Limiter1};
+ {false, Limiter1} -> C#cr{limiter = Limiter1}
+ end.
+
+tags(CList) -> [CTag || {_P, {_ChPid, #consumer{tag = CTag}}} <- CList].
+
+add_consumer({ChPid, Consumer = #consumer{args = Args}}, Queue) ->
+ Priority = case rabbit_misc:table_lookup(Args, <<"x-priority">>) of
+ {_, P} -> P;
+ _ -> 0
+ end,
+ priority_queue:in({ChPid, Consumer}, Priority, Queue).
+
+remove_consumer(ChPid, CTag, Queue) ->
+ priority_queue:filter(fun ({CP, #consumer{tag = CT}}) ->
+ (CP /= ChPid) or (CT /= CTag)
+ end, Queue).
+
+remove_consumers(ChPid, Queue) ->
+ priority_queue:filter(chan_pred(ChPid, false), Queue).
+
+chan_pred(ChPid, Want) ->
+ fun ({CP, _Consumer}) when CP =:= ChPid -> Want;
+ (_) -> not Want
+ end.
+
+update_use({inactive, _, _, _} = CUInfo, inactive) ->
+ CUInfo;
+update_use({active, _, _} = CUInfo, active) ->
+ CUInfo;
+update_use({active, Since, Avg}, inactive) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {inactive, Now, Now - Since, Avg};
+update_use({inactive, Since, Active, Avg}, active) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {active, Now, use_avg(Active, Now - Since, Avg)}.
+
+use_avg(0, 0, Avg) ->
+ Avg;
+use_avg(Active, Inactive, Avg) ->
+ Time = Inactive + Active,
+ rabbit_misc:moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg).
diff --git a/deps/rabbit/src/rabbit_queue_decorator.erl b/deps/rabbit/src/rabbit_queue_decorator.erl
new file mode 100644
index 0000000000..cbb50456c1
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_decorator.erl
@@ -0,0 +1,72 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_decorator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([select/1, set/1, register/2, unregister/1]).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+%%----------------------------------------------------------------------------
+
+-callback startup(amqqueue:amqqueue()) -> 'ok'.
+
+-callback shutdown(amqqueue:amqqueue()) -> 'ok'.
+
+-callback policy_changed(amqqueue:amqqueue(), amqqueue:amqqueue()) ->
+ 'ok'.
+
+-callback active_for(amqqueue:amqqueue()) -> boolean().
+
+%% called with Queue, MaxActivePriority, IsEmpty
+-callback consumer_state_changed(
+ amqqueue:amqqueue(), integer(), boolean()) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
+
+select(Modules) ->
+ [M || M <- Modules, code:which(M) =/= non_existing].
+
+set(Q) when ?is_amqqueue(Q) ->
+ Decorators = [D || D <- list(), D:active_for(Q)],
+ amqqueue:set_decorators(Q, Decorators).
+
+list() -> [M || {_, M} <- rabbit_registry:lookup_all(queue_decorator)].
+
+register(TypeName, ModuleName) ->
+ rabbit_registry:register(queue_decorator, TypeName, ModuleName),
+ [maybe_recover(Q) || Q <- rabbit_amqqueue:list()],
+ ok.
+
+unregister(TypeName) ->
+ rabbit_registry:unregister(queue_decorator, TypeName),
+ [maybe_recover(Q) || Q <- rabbit_amqqueue:list()],
+ ok.
+
+maybe_recover(Q0) when ?is_amqqueue(Q0) ->
+ Name = amqqueue:get_name(Q0),
+ Decs0 = amqqueue:get_decorators(Q0),
+ Q1 = set(Q0),
+ Decs1 = amqqueue:get_decorators(Q1),
+ Old = lists:sort(select(Decs0)),
+ New = lists:sort(select(Decs1)),
+ case New of
+ Old ->
+ ok;
+ _ ->
+ %% TODO LRB JSP 160169569 should startup be passed Q1 here?
+ [M:startup(Q0) || M <- New -- Old],
+ rabbit_amqqueue:update_decorators(Name)
+ end.
diff --git a/deps/rabbit/src/rabbit_queue_index.erl b/deps/rabbit/src/rabbit_queue_index.erl
new file mode 100644
index 0000000000..faab4380b5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_index.erl
@@ -0,0 +1,1521 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_index).
+
+-export([erase/1, init/3, reset_state/1, recover/6,
+ terminate/3, delete_and_terminate/1,
+ pre_publish/7, flush_pre_publish_cache/2,
+ publish/6, deliver/2, ack/2, sync/1, needs_sync/1, flush/1,
+ read/3, next_segment_boundary/1, bounds/1, start/2, stop/1]).
+
+-export([add_queue_ttl/0, avoid_zeroes/0, store_msg_size/0, store_msg/0]).
+-export([scan_queue_segments/3, scan_queue_segments/4]).
+
+%% Migrates from global to per-vhost message stores
+-export([move_to_per_vhost_stores/1,
+ update_recovery_term/2,
+ read_global_recovery_terms/1,
+ cleanup_global_recovery_terms/0]).
+
+-define(CLEAN_FILENAME, "clean.dot").
+
+%%----------------------------------------------------------------------------
+
+%% The queue index is responsible for recording the order of messages
+%% within a queue on disk. As such it contains records of messages
+%% being published, delivered and acknowledged. The publish record
+%% includes the sequence ID, message ID and a small quantity of
+%% metadata about the message; the delivery and acknowledgement
+%% records just contain the sequence ID. A publish record may also
+%% contain the complete message if provided to publish/5; this allows
+%% the message store to be avoided altogether for small messages. In
+%% either case the publish record is stored in memory in the same
+%% serialised format it will take on disk.
+%%
+%% Because of the fact that the queue can decide at any point to send
+%% a queue entry to disk, you can not rely on publishes appearing in
+%% order. The only thing you can rely on is a message being published,
+%% then delivered, then ack'd.
+%%
+%% In order to be able to clean up ack'd messages, we write to segment
+%% files. These files have a fixed number of entries: ?SEGMENT_ENTRY_COUNT
+%% publishes, delivers and acknowledgements. They are numbered, and so
+%% it is known that the 0th segment contains messages 0 ->
+%% ?SEGMENT_ENTRY_COUNT - 1, the 1st segment contains messages
+%% ?SEGMENT_ENTRY_COUNT -> 2*?SEGMENT_ENTRY_COUNT - 1 and so on. As
+%% such, in the segment files, we only refer to message sequence ids
+%% by the LSBs as SeqId rem ?SEGMENT_ENTRY_COUNT. This gives them a
+%% fixed size.
+%%
+%% However, transient messages which are not sent to disk at any point
+%% will cause gaps to appear in segment files. Therefore, we delete a
+%% segment file whenever the number of publishes == number of acks
+%% (note that although it is not fully enforced, it is assumed that a
+%% message will never be ackd before it is delivered, thus this test
+%% also implies == number of delivers). In practise, this does not
+%% cause disk churn in the pathological case because of the journal
+%% and caching (see below).
+%%
+%% Because of the fact that publishes, delivers and acks can occur all
+%% over, we wish to avoid lots of seeking. Therefore we have a fixed
+%% sized journal to which all actions are appended. When the number of
+%% entries in this journal reaches max_journal_entries, the journal
+%% entries are scattered out to their relevant files, and the journal
+%% is truncated to zero size. Note that entries in the journal must
+%% carry the full sequence id, thus the format of entries in the
+%% journal is different to that in the segments.
+%%
+%% The journal is also kept fully in memory, pre-segmented: the state
+%% contains a mapping from segment numbers to state-per-segment (this
+%% state is held for all segments which have been "seen": thus a
+%% segment which has been read but has no pending entries in the
+%% journal is still held in this mapping. Also note that a map is
+%% used for this mapping, not an array because with an array, you will
+%% always have entries from 0). Actions are stored directly in this
+%% state. Thus at the point of flushing the journal, firstly no
+%% reading from disk is necessary, but secondly if the known number of
+%% acks and publishes in a segment are equal, given the known state of
+%% the segment file combined with the journal, no writing needs to be
+%% done to the segment file either (in fact it is deleted if it exists
+%% at all). This is safe given that the set of acks is a subset of the
+%% set of publishes. When it is necessary to sync messages, it is
+%% sufficient to fsync on the journal: when entries are distributed
+%% from the journal to segment files, those segments appended to are
+%% fsync'd prior to the journal being truncated.
+%%
+%% This module is also responsible for scanning the queue index files
+%% and seeding the message store on start up.
+%%
+%% Note that in general, the representation of a message's state as
+%% the tuple: {('no_pub'|{IsPersistent, Bin, MsgBin}),
+%% ('del'|'no_del'), ('ack'|'no_ack')} is richer than strictly
+%% necessary for most operations. However, for startup, and to ensure
+%% the safe and correct combination of journal entries with entries
+%% read from the segment on disk, this richer representation vastly
+%% simplifies and clarifies the code.
+%%
+%% For notes on Clean Shutdown and startup, see documentation in
+%% rabbit_variable_queue.
+%%
+%%----------------------------------------------------------------------------
+
+%% ---- Journal details ----
+
+-define(JOURNAL_FILENAME, "journal.jif").
+-define(QUEUE_NAME_STUB_FILE, ".queue_name").
+
+-define(PUB_PERSIST_JPREFIX, 2#00).
+-define(PUB_TRANS_JPREFIX, 2#01).
+-define(DEL_JPREFIX, 2#10).
+-define(ACK_JPREFIX, 2#11).
+-define(JPREFIX_BITS, 2).
+-define(SEQ_BYTES, 8).
+-define(SEQ_BITS, ((?SEQ_BYTES * 8) - ?JPREFIX_BITS)).
+
+%% ---- Segment details ----
+
+-define(SEGMENT_EXTENSION, ".idx").
+
+%% TODO: The segment size would be configurable, but deriving all the
+%% other values is quite hairy and quite possibly noticeably less
+%% efficient, depending on how clever the compiler is when it comes to
+%% binary generation/matching with constant vs variable lengths.
+
+-define(REL_SEQ_BITS, 14).
+%% calculated as trunc(math:pow(2,?REL_SEQ_BITS))).
+-define(SEGMENT_ENTRY_COUNT, 16384).
+
+%% seq only is binary 01 followed by 14 bits of rel seq id
+%% (range: 0 - 16383)
+-define(REL_SEQ_ONLY_PREFIX, 01).
+-define(REL_SEQ_ONLY_PREFIX_BITS, 2).
+-define(REL_SEQ_ONLY_RECORD_BYTES, 2).
+
+%% publish record is binary 1 followed by a bit for is_persistent,
+%% then 14 bits of rel seq id, 64 bits for message expiry, 32 bits of
+%% size and then 128 bits of md5sum msg id.
+-define(PUB_PREFIX, 1).
+-define(PUB_PREFIX_BITS, 1).
+
+-define(EXPIRY_BYTES, 8).
+-define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)).
+-define(NO_EXPIRY, 0).
+
+-define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes
+-define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)).
+
+%% This is the size of the message body content, for stats
+-define(SIZE_BYTES, 4).
+-define(SIZE_BITS, (?SIZE_BYTES * 8)).
+
+%% This is the size of the message record embedded in the queue
+%% index. If 0, the message can be found in the message store.
+-define(EMBEDDED_SIZE_BYTES, 4).
+-define(EMBEDDED_SIZE_BITS, (?EMBEDDED_SIZE_BYTES * 8)).
+
+%% 16 bytes for md5sum + 8 for expiry
+-define(PUB_RECORD_BODY_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES + ?SIZE_BYTES)).
+%% + 4 for size
+-define(PUB_RECORD_SIZE_BYTES, (?PUB_RECORD_BODY_BYTES + ?EMBEDDED_SIZE_BYTES)).
+
+%% + 2 for seq, bits and prefix
+-define(PUB_RECORD_PREFIX_BYTES, 2).
+
+%% ---- misc ----
+
+-define(PUB, {_, _, _}). %% {IsPersistent, Bin, MsgBin}
+
+-define(READ_MODE, [binary, raw, read]).
+-define(WRITE_MODE, [write | ?READ_MODE]).
+
+%%----------------------------------------------------------------------------
+
+-record(qistate, {
+ %% queue directory where segment and journal files are stored
+ dir,
+ %% map of #segment records
+ segments,
+ %% journal file handle obtained from/used by file_handle_cache
+ journal_handle,
+ %% how many not yet flushed entries are there
+ dirty_count,
+ %% this many not yet flushed journal entries will force a flush
+ max_journal_entries,
+ %% callback function invoked when a message is "handled"
+ %% by the index and potentially can be confirmed to the publisher
+ on_sync,
+ on_sync_msg,
+ %% set of IDs of unconfirmed [to publishers] messages
+ unconfirmed,
+ unconfirmed_msg,
+ %% optimisation
+ pre_publish_cache,
+ %% optimisation
+ delivered_cache,
+ %% queue name resource record
+ queue_name}).
+
+-record(segment, {
+ %% segment ID (an integer)
+ num,
+ %% segment file path (see also ?SEGMENT_EXTENSION)
+ path,
+ %% index operation log entries in this segment
+ journal_entries,
+ entries_to_segment,
+ %% counter of unacknowledged messages
+ unacked
+}).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-rabbit_upgrade({add_queue_ttl, local, []}).
+-rabbit_upgrade({avoid_zeroes, local, [add_queue_ttl]}).
+-rabbit_upgrade({store_msg_size, local, [avoid_zeroes]}).
+-rabbit_upgrade({store_msg, local, [store_msg_size]}).
+
+-type hdl() :: ('undefined' | any()).
+-type segment() :: ('undefined' |
+ #segment { num :: non_neg_integer(),
+ path :: file:filename(),
+ journal_entries :: array:array(),
+ entries_to_segment :: array:array(),
+ unacked :: non_neg_integer()
+ }).
+-type seq_id() :: integer().
+-type seg_map() :: {map(), [segment()]}.
+-type on_sync_fun() :: fun ((gb_sets:set()) -> ok).
+-type qistate() :: #qistate { dir :: file:filename(),
+ segments :: 'undefined' | seg_map(),
+ journal_handle :: hdl(),
+ dirty_count :: integer(),
+ max_journal_entries :: non_neg_integer(),
+ on_sync :: on_sync_fun(),
+ on_sync_msg :: on_sync_fun(),
+ unconfirmed :: gb_sets:set(),
+ unconfirmed_msg :: gb_sets:set(),
+ pre_publish_cache :: list(),
+ delivered_cache :: list()
+ }.
+-type contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean()).
+-type walker(A) :: fun ((A) -> 'finished' |
+ {rabbit_types:msg_id(), non_neg_integer(), A}).
+-type shutdown_terms() :: [term()] | 'non_clean_shutdown'.
+
+%%----------------------------------------------------------------------------
+%% public API
+%%----------------------------------------------------------------------------
+
+-spec erase(rabbit_amqqueue:name()) -> 'ok'.
+
+erase(#resource{ virtual_host = VHost } = Name) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ #qistate { dir = Dir } = blank_state(VHostDir, Name),
+ erase_index_dir(Dir).
+
+%% used during variable queue purge when there are no pending acks
+
+-spec reset_state(qistate()) -> qistate().
+
+reset_state(#qistate{ queue_name = Name,
+ dir = Dir,
+ on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun,
+ journal_handle = JournalHdl }) ->
+ ok = case JournalHdl of
+ undefined -> ok;
+ _ -> file_handle_cache:close(JournalHdl)
+ end,
+ ok = erase_index_dir(Dir),
+ blank_state_name_dir_funs(Name, Dir, OnSyncFun, OnSyncMsgFun).
+
+-spec init(rabbit_amqqueue:name(),
+ on_sync_fun(), on_sync_fun()) -> qistate().
+
+init(#resource{ virtual_host = VHost } = Name, OnSyncFun, OnSyncMsgFun) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ State = #qistate { dir = Dir } = blank_state(VHostDir, Name),
+ false = rabbit_file:is_file(Dir), %% is_file == is file or dir
+ State#qistate{on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun}.
+
+-spec recover(rabbit_amqqueue:name(), shutdown_terms(), boolean(),
+ contains_predicate(),
+ on_sync_fun(), on_sync_fun()) ->
+ {'undefined' | non_neg_integer(),
+ 'undefined' | non_neg_integer(), qistate()}.
+
+recover(#resource{ virtual_host = VHost } = Name, Terms, MsgStoreRecovered,
+ ContainsCheckFun, OnSyncFun, OnSyncMsgFun) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ State = blank_state(VHostDir, Name),
+ State1 = State #qistate{on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun},
+ CleanShutdown = Terms /= non_clean_shutdown,
+ case CleanShutdown andalso MsgStoreRecovered of
+ true -> RecoveredCounts = proplists:get_value(segments, Terms, []),
+ init_clean(RecoveredCounts, State1);
+ false -> init_dirty(CleanShutdown, ContainsCheckFun, State1)
+ end.
+
+-spec terminate(rabbit_types:vhost(), [any()], qistate()) -> qistate().
+
+terminate(VHost, Terms, State = #qistate { dir = Dir }) ->
+ {SegmentCounts, State1} = terminate(State),
+ rabbit_recovery_terms:store(VHost, filename:basename(Dir),
+ [{segments, SegmentCounts} | Terms]),
+ State1.
+
+-spec delete_and_terminate(qistate()) -> qistate().
+
+delete_and_terminate(State) ->
+ {_SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State),
+ ok = rabbit_file:recursive_delete([Dir]),
+ State1.
+
+pre_publish(MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered, JournalSizeHint,
+ State = #qistate{pre_publish_cache = PPC,
+ delivered_cache = DC}) ->
+ State1 = maybe_needs_confirming(MsgProps, MsgOrId, State),
+
+ {Bin, MsgBin} = create_pub_record_body(MsgOrId, MsgProps),
+
+ PPC1 =
+ [[<<(case IsPersistent of
+ true -> ?PUB_PERSIST_JPREFIX;
+ false -> ?PUB_TRANS_JPREFIX
+ end):?JPREFIX_BITS,
+ SeqId:?SEQ_BITS, Bin/binary,
+ (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin] | PPC],
+
+ DC1 =
+ case IsDelivered of
+ true ->
+ [SeqId | DC];
+ false ->
+ DC
+ end,
+
+ State2 = add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, State1),
+ maybe_flush_pre_publish_cache(
+ JournalSizeHint,
+ State2#qistate{pre_publish_cache = PPC1,
+ delivered_cache = DC1}).
+
+%% pre_publish_cache is the entry with most elements when compared to
+%% delivered_cache so we only check the former in the guard.
+maybe_flush_pre_publish_cache(JournalSizeHint,
+ #qistate{pre_publish_cache = PPC} = State)
+ when length(PPC) >= ?SEGMENT_ENTRY_COUNT ->
+ flush_pre_publish_cache(JournalSizeHint, State);
+maybe_flush_pre_publish_cache(_JournalSizeHint, State) ->
+ State.
+
+flush_pre_publish_cache(JournalSizeHint, State) ->
+ State1 = flush_pre_publish_cache(State),
+ State2 = flush_delivered_cache(State1),
+ maybe_flush_journal(JournalSizeHint, State2).
+
+flush_pre_publish_cache(#qistate{pre_publish_cache = []} = State) ->
+ State;
+flush_pre_publish_cache(State = #qistate{pre_publish_cache = PPC}) ->
+ {JournalHdl, State1} = get_journal_handle(State),
+ file_handle_cache_stats:update(queue_index_journal_write),
+ ok = file_handle_cache:append(JournalHdl, lists:reverse(PPC)),
+ State1#qistate{pre_publish_cache = []}.
+
+flush_delivered_cache(#qistate{delivered_cache = []} = State) ->
+ State;
+flush_delivered_cache(State = #qistate{delivered_cache = DC}) ->
+ State1 = deliver(lists:reverse(DC), State),
+ State1#qistate{delivered_cache = []}.
+
+-spec publish(rabbit_types:msg_id(), seq_id(),
+ rabbit_types:message_properties(), boolean(),
+ non_neg_integer(), qistate()) -> qistate().
+
+publish(MsgOrId, SeqId, MsgProps, IsPersistent, JournalSizeHint, State) ->
+ {JournalHdl, State1} =
+ get_journal_handle(
+ maybe_needs_confirming(MsgProps, MsgOrId, State)),
+ file_handle_cache_stats:update(queue_index_journal_write),
+ {Bin, MsgBin} = create_pub_record_body(MsgOrId, MsgProps),
+ ok = file_handle_cache:append(
+ JournalHdl, [<<(case IsPersistent of
+ true -> ?PUB_PERSIST_JPREFIX;
+ false -> ?PUB_TRANS_JPREFIX
+ end):?JPREFIX_BITS,
+ SeqId:?SEQ_BITS, Bin/binary,
+ (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin]),
+ maybe_flush_journal(
+ JournalSizeHint,
+ add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, State1)).
+
+maybe_needs_confirming(MsgProps, MsgOrId,
+ State = #qistate{unconfirmed = UC,
+ unconfirmed_msg = UCM}) ->
+ MsgId = case MsgOrId of
+ #basic_message{id = Id} -> Id;
+ Id when is_binary(Id) -> Id
+ end,
+ ?MSG_ID_BYTES = size(MsgId),
+ case {MsgProps#message_properties.needs_confirming, MsgOrId} of
+ {true, MsgId} -> UC1 = gb_sets:add_element(MsgId, UC),
+ State#qistate{unconfirmed = UC1};
+ {true, _} -> UCM1 = gb_sets:add_element(MsgId, UCM),
+ State#qistate{unconfirmed_msg = UCM1};
+ {false, _} -> State
+ end.
+
+-spec deliver([seq_id()], qistate()) -> qistate().
+
+deliver(SeqIds, State) ->
+ deliver_or_ack(del, SeqIds, State).
+
+-spec ack([seq_id()], qistate()) -> qistate().
+
+ack(SeqIds, State) ->
+ deliver_or_ack(ack, SeqIds, State).
+
+%% This is called when there are outstanding confirms or when the
+%% queue is idle and the journal needs syncing (see needs_sync/1).
+
+-spec sync(qistate()) -> qistate().
+
+sync(State = #qistate { journal_handle = undefined }) ->
+ State;
+sync(State = #qistate { journal_handle = JournalHdl }) ->
+ ok = file_handle_cache:sync(JournalHdl),
+ notify_sync(State).
+
+-spec needs_sync(qistate()) -> 'confirms' | 'other' | 'false'.
+
+needs_sync(#qistate{journal_handle = undefined}) ->
+ false;
+needs_sync(#qistate{journal_handle = JournalHdl,
+ unconfirmed = UC,
+ unconfirmed_msg = UCM}) ->
+ case gb_sets:is_empty(UC) andalso gb_sets:is_empty(UCM) of
+ true -> case file_handle_cache:needs_sync(JournalHdl) of
+ true -> other;
+ false -> false
+ end;
+ false -> confirms
+ end.
+
+-spec flush(qistate()) -> qistate().
+
+flush(State = #qistate { dirty_count = 0 }) -> State;
+flush(State) -> flush_journal(State).
+
+-spec read(seq_id(), seq_id(), qistate()) ->
+ {[{rabbit_types:msg_id(), seq_id(),
+ rabbit_types:message_properties(),
+ boolean(), boolean()}], qistate()}.
+
+read(StartEnd, StartEnd, State) ->
+ {[], State};
+read(Start, End, State = #qistate { segments = Segments,
+ dir = Dir }) when Start =< End ->
+ %% Start is inclusive, End is exclusive.
+ LowerB = {StartSeg, _StartRelSeq} = seq_id_to_seg_and_rel_seq_id(Start),
+ UpperB = {EndSeg, _EndRelSeq} = seq_id_to_seg_and_rel_seq_id(End - 1),
+ {Messages, Segments1} =
+ lists:foldr(fun (Seg, Acc) ->
+ read_bounded_segment(Seg, LowerB, UpperB, Acc, Dir)
+ end, {[], Segments}, lists:seq(StartSeg, EndSeg)),
+ {Messages, State #qistate { segments = Segments1 }}.
+
+-spec next_segment_boundary(seq_id()) -> seq_id().
+
+next_segment_boundary(SeqId) ->
+ {Seg, _RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId),
+ reconstruct_seq_id(Seg + 1, 0).
+
+-spec bounds(qistate()) ->
+ {non_neg_integer(), non_neg_integer(), qistate()}.
+
+bounds(State = #qistate { segments = Segments }) ->
+ %% This is not particularly efficient, but only gets invoked on
+ %% queue initialisation.
+ SegNums = lists:sort(segment_nums(Segments)),
+ %% Don't bother trying to figure out the lowest seq_id, merely the
+ %% seq_id of the start of the lowest segment. That seq_id may not
+ %% actually exist, but that's fine. The important thing is that
+ %% the segment exists and the seq_id reported is on a segment
+ %% boundary.
+ %%
+ %% We also don't really care about the max seq_id. Just start the
+ %% next segment: it makes life much easier.
+ %%
+ %% SegNums is sorted, ascending.
+ {LowSeqId, NextSeqId} =
+ case SegNums of
+ [] -> {0, 0};
+ [MinSeg|_] -> {reconstruct_seq_id(MinSeg, 0),
+ reconstruct_seq_id(1 + lists:last(SegNums), 0)}
+ end,
+ {LowSeqId, NextSeqId, State}.
+
+-spec start(rabbit_types:vhost(), [rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}.
+
+start(VHost, DurableQueueNames) ->
+ ok = rabbit_recovery_terms:start(VHost),
+ {DurableTerms, DurableDirectories} =
+ lists:foldl(
+ fun(QName, {RecoveryTerms, ValidDirectories}) ->
+ DirName = queue_name_to_dir_name(QName),
+ RecoveryInfo = case rabbit_recovery_terms:read(VHost, DirName) of
+ {error, _} -> non_clean_shutdown;
+ {ok, Terms} -> Terms
+ end,
+ {[RecoveryInfo | RecoveryTerms],
+ sets:add_element(DirName, ValidDirectories)}
+ end, {[], sets:new()}, DurableQueueNames),
+ %% Any queue directory we've not been asked to recover is considered garbage
+ rabbit_file:recursive_delete(
+ [DirName ||
+ DirName <- all_queue_directory_names(VHost),
+ not sets:is_element(filename:basename(DirName), DurableDirectories)]),
+ rabbit_recovery_terms:clear(VHost),
+
+ %% The backing queue interface requires that the queue recovery terms
+ %% which come back from start/1 are in the same order as DurableQueueNames
+ OrderedTerms = lists:reverse(DurableTerms),
+ {OrderedTerms, {fun queue_index_walker/1, {start, DurableQueueNames}}}.
+
+
+stop(VHost) -> rabbit_recovery_terms:stop(VHost).
+
+all_queue_directory_names(VHost) ->
+ filelib:wildcard(filename:join([rabbit_vhost:msg_store_dir_path(VHost),
+ "queues", "*"])).
+
+all_queue_directory_names() ->
+ filelib:wildcard(filename:join([rabbit_vhost:msg_store_dir_wildcard(),
+ "queues", "*"])).
+
+%%----------------------------------------------------------------------------
+%% startup and shutdown
+%%----------------------------------------------------------------------------
+
+erase_index_dir(Dir) ->
+ case rabbit_file:is_dir(Dir) of
+ true -> rabbit_file:recursive_delete([Dir]);
+ false -> ok
+ end.
+
+blank_state(VHostDir, QueueName) ->
+ Dir = queue_dir(VHostDir, QueueName),
+ blank_state_name_dir_funs(QueueName,
+ Dir,
+ fun (_) -> ok end,
+ fun (_) -> ok end).
+
+queue_dir(VHostDir, QueueName) ->
+ %% Queue directory is
+ %% {node_database_dir}/msg_stores/vhosts/{vhost}/queues/{queue}
+ QueueDir = queue_name_to_dir_name(QueueName),
+ filename:join([VHostDir, "queues", QueueDir]).
+
+queue_name_to_dir_name(#resource { kind = queue,
+ virtual_host = VHost,
+ name = QName }) ->
+ <<Num:128>> = erlang:md5(<<"queue", VHost/binary, QName/binary>>),
+ rabbit_misc:format("~.36B", [Num]).
+
+queue_name_to_dir_name_legacy(Name = #resource { kind = queue }) ->
+ <<Num:128>> = erlang:md5(term_to_binary_compat:term_to_binary_1(Name)),
+ rabbit_misc:format("~.36B", [Num]).
+
+queues_base_dir() ->
+ rabbit_mnesia:dir().
+
+blank_state_name_dir_funs(Name, Dir, OnSyncFun, OnSyncMsgFun) ->
+ {ok, MaxJournal} =
+ application:get_env(rabbit, queue_index_max_journal_entries),
+ #qistate { dir = Dir,
+ segments = segments_new(),
+ journal_handle = undefined,
+ dirty_count = 0,
+ max_journal_entries = MaxJournal,
+ on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun,
+ unconfirmed = gb_sets:new(),
+ unconfirmed_msg = gb_sets:new(),
+ pre_publish_cache = [],
+ delivered_cache = [],
+ queue_name = Name }.
+
+init_clean(RecoveredCounts, State) ->
+ %% Load the journal. Since this is a clean recovery this (almost)
+ %% gets us back to where we were on shutdown.
+ State1 = #qistate { dir = Dir, segments = Segments } = load_journal(State),
+ %% The journal loading only creates records for segments touched
+ %% by the journal, and the counts are based on the journal entries
+ %% only. We need *complete* counts for *all* segments. By an
+ %% amazing coincidence we stored that information on shutdown.
+ Segments1 =
+ lists:foldl(
+ fun ({Seg, UnackedCount}, SegmentsN) ->
+ Segment = segment_find_or_new(Seg, Dir, SegmentsN),
+ segment_store(Segment #segment { unacked = UnackedCount },
+ SegmentsN)
+ end, Segments, RecoveredCounts),
+ %% the counts above include transient messages, which would be the
+ %% wrong thing to return
+ {undefined, undefined, State1 # qistate { segments = Segments1 }}.
+
+init_dirty(CleanShutdown, ContainsCheckFun, State) ->
+ %% Recover the journal completely. This will also load segments
+ %% which have entries in the journal and remove duplicates. The
+ %% counts will correctly reflect the combination of the segment
+ %% and the journal.
+ State1 = #qistate { dir = Dir, segments = Segments } =
+ recover_journal(State),
+ {Segments1, Count, Bytes, DirtyCount} =
+ %% Load each segment in turn and filter out messages that are
+ %% not in the msg_store, by adding acks to the journal. These
+ %% acks only go to the RAM journal as it doesn't matter if we
+ %% lose them. Also mark delivered if not clean shutdown. Also
+ %% find the number of unacked messages. Also accumulate the
+ %% dirty count here, so we can call maybe_flush_journal below
+ %% and avoid unnecessary file system operations.
+ lists:foldl(
+ fun (Seg, {Segments2, CountAcc, BytesAcc, DirtyCount}) ->
+ {{Segment = #segment { unacked = UnackedCount }, Dirty},
+ UnackedBytes} =
+ recover_segment(ContainsCheckFun, CleanShutdown,
+ segment_find_or_new(Seg, Dir, Segments2),
+ State1#qistate.max_journal_entries),
+ {segment_store(Segment, Segments2),
+ CountAcc + UnackedCount,
+ BytesAcc + UnackedBytes, DirtyCount + Dirty}
+ end, {Segments, 0, 0, 0}, all_segment_nums(State1)),
+ State2 = maybe_flush_journal(State1 #qistate { segments = Segments1,
+ dirty_count = DirtyCount }),
+ {Count, Bytes, State2}.
+
+terminate(State = #qistate { journal_handle = JournalHdl,
+ segments = Segments }) ->
+ ok = case JournalHdl of
+ undefined -> ok;
+ _ -> file_handle_cache:close(JournalHdl)
+ end,
+ SegmentCounts =
+ segment_fold(
+ fun (#segment { num = Seg, unacked = UnackedCount }, Acc) ->
+ [{Seg, UnackedCount} | Acc]
+ end, [], Segments),
+ {SegmentCounts, State #qistate { journal_handle = undefined,
+ segments = undefined }}.
+
+recover_segment(ContainsCheckFun, CleanShutdown,
+ Segment = #segment { journal_entries = JEntries }, MaxJournal) ->
+ {SegEntries, UnackedCount} = load_segment(false, Segment),
+ {SegEntries1, UnackedCountDelta} =
+ segment_plus_journal(SegEntries, JEntries),
+ array:sparse_foldl(
+ fun (RelSeq, {{IsPersistent, Bin, MsgBin}, Del, no_ack},
+ {SegmentAndDirtyCount, Bytes}) ->
+ {MsgOrId, MsgProps} = parse_pub_record_body(Bin, MsgBin),
+ {recover_message(ContainsCheckFun(MsgOrId), CleanShutdown,
+ Del, RelSeq, SegmentAndDirtyCount, MaxJournal),
+ Bytes + case IsPersistent of
+ true -> MsgProps#message_properties.size;
+ false -> 0
+ end}
+ end,
+ {{Segment #segment { unacked = UnackedCount + UnackedCountDelta }, 0}, 0},
+ SegEntries1).
+
+recover_message( true, true, _Del, _RelSeq, SegmentAndDirtyCount, _MaxJournal) ->
+ SegmentAndDirtyCount;
+recover_message( true, false, del, _RelSeq, SegmentAndDirtyCount, _MaxJournal) ->
+ SegmentAndDirtyCount;
+recover_message( true, false, no_del, RelSeq, {Segment, _DirtyCount}, MaxJournal) ->
+ %% force to flush the segment
+ {add_to_journal(RelSeq, del, Segment), MaxJournal + 1};
+recover_message(false, _, del, RelSeq, {Segment, DirtyCount}, _MaxJournal) ->
+ {add_to_journal(RelSeq, ack, Segment), DirtyCount + 1};
+recover_message(false, _, no_del, RelSeq, {Segment, DirtyCount}, _MaxJournal) ->
+ {add_to_journal(RelSeq, ack,
+ add_to_journal(RelSeq, del, Segment)),
+ DirtyCount + 2}.
+
+%%----------------------------------------------------------------------------
+%% msg store startup delta function
+%%----------------------------------------------------------------------------
+
+queue_index_walker({start, DurableQueues}) when is_list(DurableQueues) ->
+ {ok, Gatherer} = gatherer:start_link(),
+ [begin
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () -> link(Gatherer),
+ ok = queue_index_walker_reader(QueueName, Gatherer),
+ unlink(Gatherer),
+ ok
+ end)
+ end || QueueName <- DurableQueues],
+ queue_index_walker({next, Gatherer});
+
+queue_index_walker({next, Gatherer}) when is_pid(Gatherer) ->
+ case gatherer:out(Gatherer) of
+ empty ->
+ ok = gatherer:stop(Gatherer),
+ finished;
+ {value, {MsgId, Count}} ->
+ {MsgId, Count, {next, Gatherer}}
+ end.
+
+queue_index_walker_reader(QueueName, Gatherer) ->
+ ok = scan_queue_segments(
+ fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, ok)
+ when is_binary(MsgId) ->
+ gatherer:sync_in(Gatherer, {MsgId, 1});
+ (_SeqId, _MsgId, _MsgProps, _IsPersistent, _IsDelivered,
+ _IsAcked, Acc) ->
+ Acc
+ end, ok, QueueName),
+ ok = gatherer:finish(Gatherer).
+
+scan_queue_segments(Fun, Acc, #resource{ virtual_host = VHost } = QueueName) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ scan_queue_segments(Fun, Acc, VHostDir, QueueName).
+
+scan_queue_segments(Fun, Acc, VHostDir, QueueName) ->
+ State = #qistate { segments = Segments, dir = Dir } =
+ recover_journal(blank_state(VHostDir, QueueName)),
+ Result = lists:foldr(
+ fun (Seg, AccN) ->
+ segment_entries_foldr(
+ fun (RelSeq, {{MsgOrId, MsgProps, IsPersistent},
+ IsDelivered, IsAcked}, AccM) ->
+ Fun(reconstruct_seq_id(Seg, RelSeq), MsgOrId, MsgProps,
+ IsPersistent, IsDelivered, IsAcked, AccM)
+ end, AccN, segment_find_or_new(Seg, Dir, Segments))
+ end, Acc, all_segment_nums(State)),
+ {_SegmentCounts, _State} = terminate(State),
+ Result.
+
+%%----------------------------------------------------------------------------
+%% expiry/binary manipulation
+%%----------------------------------------------------------------------------
+
+create_pub_record_body(MsgOrId, #message_properties { expiry = Expiry,
+ size = Size }) ->
+ ExpiryBin = expiry_to_binary(Expiry),
+ case MsgOrId of
+ MsgId when is_binary(MsgId) ->
+ {<<MsgId/binary, ExpiryBin/binary, Size:?SIZE_BITS>>, <<>>};
+ #basic_message{id = MsgId} ->
+ MsgBin = term_to_binary(MsgOrId),
+ {<<MsgId/binary, ExpiryBin/binary, Size:?SIZE_BITS>>, MsgBin}
+ end.
+
+expiry_to_binary(undefined) -> <<?NO_EXPIRY:?EXPIRY_BITS>>;
+expiry_to_binary(Expiry) -> <<Expiry:?EXPIRY_BITS>>.
+
+parse_pub_record_body(<<MsgIdNum:?MSG_ID_BITS, Expiry:?EXPIRY_BITS,
+ Size:?SIZE_BITS>>, MsgBin) ->
+ %% work around for binary data fragmentation. See
+ %% rabbit_msg_file:read_next/2
+ <<MsgId:?MSG_ID_BYTES/binary>> = <<MsgIdNum:?MSG_ID_BITS>>,
+ Props = #message_properties{expiry = case Expiry of
+ ?NO_EXPIRY -> undefined;
+ X -> X
+ end,
+ size = Size},
+ case MsgBin of
+ <<>> -> {MsgId, Props};
+ _ -> Msg = #basic_message{id = MsgId} = binary_to_term(MsgBin),
+ {Msg, Props}
+ end.
+
+%%----------------------------------------------------------------------------
+%% journal manipulation
+%%----------------------------------------------------------------------------
+
+add_to_journal(SeqId, Action, State = #qistate { dirty_count = DCount,
+ segments = Segments,
+ dir = Dir }) ->
+ {Seg, RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId),
+ Segment = segment_find_or_new(Seg, Dir, Segments),
+ Segment1 = add_to_journal(RelSeq, Action, Segment),
+ State #qistate { dirty_count = DCount + 1,
+ segments = segment_store(Segment1, Segments) };
+
+add_to_journal(RelSeq, Action,
+ Segment = #segment { journal_entries = JEntries,
+ entries_to_segment = EToSeg,
+ unacked = UnackedCount }) ->
+
+ {Fun, Entry} = action_to_entry(RelSeq, Action, JEntries),
+
+ {JEntries1, EToSeg1} =
+ case Fun of
+ set ->
+ {array:set(RelSeq, Entry, JEntries),
+ array:set(RelSeq, entry_to_segment(RelSeq, Entry, []),
+ EToSeg)};
+ reset ->
+ {array:reset(RelSeq, JEntries),
+ array:reset(RelSeq, EToSeg)}
+ end,
+
+ Segment #segment {
+ journal_entries = JEntries1,
+ entries_to_segment = EToSeg1,
+ unacked = UnackedCount + case Action of
+ ?PUB -> +1;
+ del -> 0;
+ ack -> -1
+ end}.
+
+action_to_entry(RelSeq, Action, JEntries) ->
+ case array:get(RelSeq, JEntries) of
+ undefined ->
+ {set,
+ case Action of
+ ?PUB -> {Action, no_del, no_ack};
+ del -> {no_pub, del, no_ack};
+ ack -> {no_pub, no_del, ack}
+ end};
+ ({Pub, no_del, no_ack}) when Action == del ->
+ {set, {Pub, del, no_ack}};
+ ({no_pub, del, no_ack}) when Action == ack ->
+ {set, {no_pub, del, ack}};
+ ({?PUB, del, no_ack}) when Action == ack ->
+ {reset, none}
+ end.
+
+maybe_flush_journal(State) ->
+ maybe_flush_journal(infinity, State).
+
+maybe_flush_journal(Hint, State = #qistate { dirty_count = DCount,
+ max_journal_entries = MaxJournal })
+ when DCount > MaxJournal orelse (Hint =/= infinity andalso DCount > Hint) ->
+ flush_journal(State);
+maybe_flush_journal(_Hint, State) ->
+ State.
+
+flush_journal(State = #qistate { segments = Segments }) ->
+ Segments1 =
+ segment_fold(
+ fun (#segment { unacked = 0, path = Path }, SegmentsN) ->
+ case rabbit_file:is_file(Path) of
+ true -> ok = rabbit_file:delete(Path);
+ false -> ok
+ end,
+ SegmentsN;
+ (#segment {} = Segment, SegmentsN) ->
+ segment_store(append_journal_to_segment(Segment), SegmentsN)
+ end, segments_new(), Segments),
+ {JournalHdl, State1} =
+ get_journal_handle(State #qistate { segments = Segments1 }),
+ ok = file_handle_cache:clear(JournalHdl),
+ notify_sync(State1 #qistate { dirty_count = 0 }).
+
+append_journal_to_segment(#segment { journal_entries = JEntries,
+ entries_to_segment = EToSeg,
+ path = Path } = Segment) ->
+ case array:sparse_size(JEntries) of
+ 0 -> Segment;
+ _ ->
+ file_handle_cache_stats:update(queue_index_write),
+
+ {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+ Path, ?WRITE_MODE,
+ [{write_buffer, infinity}]),
+ %% the file_handle_cache also does a list reverse, so this
+ %% might not be required here, but before we were doing a
+ %% sparse_foldr, a lists:reverse/1 seems to be the correct
+ %% thing to do for now.
+ file_handle_cache:append(Hdl, lists:reverse(array:to_list(EToSeg))),
+ ok = file_handle_cache:close(Hdl),
+ Segment #segment { journal_entries = array_new(),
+ entries_to_segment = array_new([]) }
+ end.
+
+get_journal_handle(State = #qistate { journal_handle = undefined,
+ dir = Dir,
+ queue_name = Name }) ->
+ Path = filename:join(Dir, ?JOURNAL_FILENAME),
+ ok = rabbit_file:ensure_dir(Path),
+ ok = ensure_queue_name_stub_file(Dir, Name),
+ {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+ Path, ?WRITE_MODE, [{write_buffer, infinity}]),
+ {Hdl, State #qistate { journal_handle = Hdl }};
+get_journal_handle(State = #qistate { journal_handle = Hdl }) ->
+ {Hdl, State}.
+
+%% Loading Journal. This isn't idempotent and will mess up the counts
+%% if you call it more than once on the same state. Assumes the counts
+%% are 0 to start with.
+load_journal(State = #qistate { dir = Dir }) ->
+ Path = filename:join(Dir, ?JOURNAL_FILENAME),
+ case rabbit_file:is_file(Path) of
+ true -> {JournalHdl, State1} = get_journal_handle(State),
+ Size = rabbit_file:file_size(Path),
+ {ok, 0} = file_handle_cache:position(JournalHdl, 0),
+ {ok, JournalBin} = file_handle_cache:read(JournalHdl, Size),
+ parse_journal_entries(JournalBin, State1);
+ false -> State
+ end.
+
+%% ditto
+recover_journal(State) ->
+ State1 = #qistate { segments = Segments } = load_journal(State),
+ Segments1 =
+ segment_map(
+ fun (Segment = #segment { journal_entries = JEntries,
+ entries_to_segment = EToSeg,
+ unacked = UnackedCountInJournal }) ->
+ %% We want to keep ack'd entries in so that we can
+ %% remove them if duplicates are in the journal. The
+ %% counts here are purely from the segment itself.
+ {SegEntries, UnackedCountInSeg} = load_segment(true, Segment),
+ {JEntries1, EToSeg1, UnackedCountDuplicates} =
+ journal_minus_segment(JEntries, EToSeg, SegEntries),
+ Segment #segment { journal_entries = JEntries1,
+ entries_to_segment = EToSeg1,
+ unacked = (UnackedCountInJournal +
+ UnackedCountInSeg -
+ UnackedCountDuplicates) }
+ end, Segments),
+ State1 #qistate { segments = Segments1 }.
+
+parse_journal_entries(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>, State) ->
+ parse_journal_entries(Rest, add_to_journal(SeqId, del, State));
+
+parse_journal_entries(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>, State) ->
+ parse_journal_entries(Rest, add_to_journal(SeqId, ack, State));
+parse_journal_entries(<<0:?JPREFIX_BITS, 0:?SEQ_BITS,
+ 0:?PUB_RECORD_SIZE_BYTES/unit:8, _/binary>>, State) ->
+ %% Journal entry composed only of zeroes was probably
+ %% produced during a dirty shutdown so stop reading
+ State;
+parse_journal_entries(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Bin:?PUB_RECORD_BODY_BYTES/binary,
+ MsgSize:?EMBEDDED_SIZE_BITS, MsgBin:MsgSize/binary,
+ Rest/binary>>, State) ->
+ IsPersistent = case Prefix of
+ ?PUB_PERSIST_JPREFIX -> true;
+ ?PUB_TRANS_JPREFIX -> false
+ end,
+ parse_journal_entries(
+ Rest, add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, State));
+parse_journal_entries(_ErrOrEoF, State) ->
+ State.
+
+deliver_or_ack(_Kind, [], State) ->
+ State;
+deliver_or_ack(Kind, SeqIds, State) ->
+ JPrefix = case Kind of ack -> ?ACK_JPREFIX; del -> ?DEL_JPREFIX end,
+ {JournalHdl, State1} = get_journal_handle(State),
+ file_handle_cache_stats:update(queue_index_journal_write),
+ ok = file_handle_cache:append(
+ JournalHdl,
+ [<<JPrefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>> || SeqId <- SeqIds]),
+ maybe_flush_journal(lists:foldl(fun (SeqId, StateN) ->
+ add_to_journal(SeqId, Kind, StateN)
+ end, State1, SeqIds)).
+
+notify_sync(State = #qistate{unconfirmed = UC,
+ unconfirmed_msg = UCM,
+ on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun}) ->
+ State1 = case gb_sets:is_empty(UC) of
+ true -> State;
+ false -> OnSyncFun(UC),
+ State#qistate{unconfirmed = gb_sets:new()}
+ end,
+ case gb_sets:is_empty(UCM) of
+ true -> State1;
+ false -> OnSyncMsgFun(UCM),
+ State1#qistate{unconfirmed_msg = gb_sets:new()}
+ end.
+
+%%----------------------------------------------------------------------------
+%% segment manipulation
+%%----------------------------------------------------------------------------
+
+seq_id_to_seg_and_rel_seq_id(SeqId) ->
+ { SeqId div ?SEGMENT_ENTRY_COUNT, SeqId rem ?SEGMENT_ENTRY_COUNT }.
+
+reconstruct_seq_id(Seg, RelSeq) ->
+ (Seg * ?SEGMENT_ENTRY_COUNT) + RelSeq.
+
+all_segment_nums(#qistate { dir = Dir, segments = Segments }) ->
+ lists:sort(
+ sets:to_list(
+ lists:foldl(
+ fun (SegName, Set) ->
+ sets:add_element(
+ list_to_integer(
+ lists:takewhile(fun (C) -> $0 =< C andalso C =< $9 end,
+ SegName)), Set)
+ end, sets:from_list(segment_nums(Segments)),
+ rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)))).
+
+segment_find_or_new(Seg, Dir, Segments) ->
+ case segment_find(Seg, Segments) of
+ {ok, Segment} -> Segment;
+ error -> SegName = integer_to_list(Seg) ++ ?SEGMENT_EXTENSION,
+ Path = filename:join(Dir, SegName),
+ #segment { num = Seg,
+ path = Path,
+ journal_entries = array_new(),
+ entries_to_segment = array_new([]),
+ unacked = 0 }
+ end.
+
+segment_find(Seg, {_Segments, [Segment = #segment { num = Seg } |_]}) ->
+ {ok, Segment}; %% 1 or (2, matches head)
+segment_find(Seg, {_Segments, [_, Segment = #segment { num = Seg }]}) ->
+ {ok, Segment}; %% 2, matches tail
+segment_find(Seg, {Segments, _}) -> %% no match
+ maps:find(Seg, Segments).
+
+segment_store(Segment = #segment { num = Seg }, %% 1 or (2, matches head)
+ {Segments, [#segment { num = Seg } | Tail]}) ->
+ {Segments, [Segment | Tail]};
+segment_store(Segment = #segment { num = Seg }, %% 2, matches tail
+ {Segments, [SegmentA, #segment { num = Seg }]}) ->
+ {Segments, [Segment, SegmentA]};
+segment_store(Segment = #segment { num = Seg }, {Segments, []}) ->
+ {maps:remove(Seg, Segments), [Segment]};
+segment_store(Segment = #segment { num = Seg }, {Segments, [SegmentA]}) ->
+ {maps:remove(Seg, Segments), [Segment, SegmentA]};
+segment_store(Segment = #segment { num = Seg },
+ {Segments, [SegmentA, SegmentB]}) ->
+ {maps:put(SegmentB#segment.num, SegmentB, maps:remove(Seg, Segments)),
+ [Segment, SegmentA]}.
+
+segment_fold(Fun, Acc, {Segments, CachedSegments}) ->
+ maps:fold(fun (_Seg, Segment, Acc1) -> Fun(Segment, Acc1) end,
+ lists:foldl(Fun, Acc, CachedSegments), Segments).
+
+segment_map(Fun, {Segments, CachedSegments}) ->
+ {maps:map(fun (_Seg, Segment) -> Fun(Segment) end, Segments),
+ lists:map(Fun, CachedSegments)}.
+
+segment_nums({Segments, CachedSegments}) ->
+ lists:map(fun (#segment { num = Num }) -> Num end, CachedSegments) ++
+ maps:keys(Segments).
+
+segments_new() ->
+ {#{}, []}.
+
+entry_to_segment(_RelSeq, {?PUB, del, ack}, Initial) ->
+ Initial;
+entry_to_segment(RelSeq, {Pub, Del, Ack}, Initial) ->
+ %% NB: we are assembling the segment in reverse order here, so
+ %% del/ack comes first.
+ Buf1 = case {Del, Ack} of
+ {no_del, no_ack} ->
+ Initial;
+ _ ->
+ Binary = <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS>>,
+ case {Del, Ack} of
+ {del, ack} -> [[Binary, Binary] | Initial];
+ _ -> [Binary | Initial]
+ end
+ end,
+ case Pub of
+ no_pub ->
+ Buf1;
+ {IsPersistent, Bin, MsgBin} ->
+ [[<<?PUB_PREFIX:?PUB_PREFIX_BITS,
+ (bool_to_int(IsPersistent)):1,
+ RelSeq:?REL_SEQ_BITS, Bin/binary,
+ (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin] | Buf1]
+ end.
+
+read_bounded_segment(Seg, {StartSeg, StartRelSeq}, {EndSeg, EndRelSeq},
+ {Messages, Segments}, Dir) ->
+ Segment = segment_find_or_new(Seg, Dir, Segments),
+ {segment_entries_foldr(
+ fun (RelSeq, {{MsgOrId, MsgProps, IsPersistent}, IsDelivered, no_ack},
+ Acc)
+ when (Seg > StartSeg orelse StartRelSeq =< RelSeq) andalso
+ (Seg < EndSeg orelse EndRelSeq >= RelSeq) ->
+ [{MsgOrId, reconstruct_seq_id(StartSeg, RelSeq), MsgProps,
+ IsPersistent, IsDelivered == del} | Acc];
+ (_RelSeq, _Value, Acc) ->
+ Acc
+ end, Messages, Segment),
+ segment_store(Segment, Segments)}.
+
+segment_entries_foldr(Fun, Init,
+ Segment = #segment { journal_entries = JEntries }) ->
+ {SegEntries, _UnackedCount} = load_segment(false, Segment),
+ {SegEntries1, _UnackedCountD} = segment_plus_journal(SegEntries, JEntries),
+ array:sparse_foldr(
+ fun (RelSeq, {{IsPersistent, Bin, MsgBin}, Del, Ack}, Acc) ->
+ {MsgOrId, MsgProps} = parse_pub_record_body(Bin, MsgBin),
+ Fun(RelSeq, {{MsgOrId, MsgProps, IsPersistent}, Del, Ack}, Acc)
+ end, Init, SegEntries1).
+
+%% Loading segments
+%%
+%% Does not do any combining with the journal at all.
+load_segment(KeepAcked, #segment { path = Path }) ->
+ Empty = {array_new(), 0},
+ case rabbit_file:is_file(Path) of
+ false -> Empty;
+ true -> Size = rabbit_file:file_size(Path),
+ file_handle_cache_stats:update(queue_index_read),
+ {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+ Path, ?READ_MODE, []),
+ {ok, 0} = file_handle_cache:position(Hdl, bof),
+ {ok, SegBin} = file_handle_cache:read(Hdl, Size),
+ ok = file_handle_cache:close(Hdl),
+ Res = parse_segment_entries(SegBin, KeepAcked, Empty),
+ Res
+ end.
+
+parse_segment_entries(<<?PUB_PREFIX:?PUB_PREFIX_BITS,
+ IsPersistNum:1, RelSeq:?REL_SEQ_BITS, Rest/binary>>,
+ KeepAcked, Acc) ->
+ parse_segment_publish_entry(
+ Rest, 1 == IsPersistNum, RelSeq, KeepAcked, Acc);
+parse_segment_entries(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>, KeepAcked, Acc) ->
+ parse_segment_entries(
+ Rest, KeepAcked, add_segment_relseq_entry(KeepAcked, RelSeq, Acc));
+parse_segment_entries(<<>>, _KeepAcked, Acc) ->
+ Acc.
+
+parse_segment_publish_entry(<<Bin:?PUB_RECORD_BODY_BYTES/binary,
+ MsgSize:?EMBEDDED_SIZE_BITS,
+ MsgBin:MsgSize/binary, Rest/binary>>,
+ IsPersistent, RelSeq, KeepAcked,
+ {SegEntries, Unacked}) ->
+ Obj = {{IsPersistent, Bin, MsgBin}, no_del, no_ack},
+ SegEntries1 = array:set(RelSeq, Obj, SegEntries),
+ parse_segment_entries(Rest, KeepAcked, {SegEntries1, Unacked + 1});
+parse_segment_publish_entry(Rest, _IsPersistent, _RelSeq, KeepAcked, Acc) ->
+ parse_segment_entries(Rest, KeepAcked, Acc).
+
+add_segment_relseq_entry(KeepAcked, RelSeq, {SegEntries, Unacked}) ->
+ case array:get(RelSeq, SegEntries) of
+ {Pub, no_del, no_ack} ->
+ {array:set(RelSeq, {Pub, del, no_ack}, SegEntries), Unacked};
+ {Pub, del, no_ack} when KeepAcked ->
+ {array:set(RelSeq, {Pub, del, ack}, SegEntries), Unacked - 1};
+ {_Pub, del, no_ack} ->
+ {array:reset(RelSeq, SegEntries), Unacked - 1}
+ end.
+
+array_new() ->
+ array_new(undefined).
+
+array_new(Default) ->
+ array:new([{default, Default}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]).
+
+bool_to_int(true ) -> 1;
+bool_to_int(false) -> 0.
+
+%%----------------------------------------------------------------------------
+%% journal & segment combination
+%%----------------------------------------------------------------------------
+
+%% Combine what we have just read from a segment file with what we're
+%% holding for that segment in memory. There must be no duplicates.
+segment_plus_journal(SegEntries, JEntries) ->
+ array:sparse_foldl(
+ fun (RelSeq, JObj, {SegEntriesOut, AdditionalUnacked}) ->
+ SegEntry = array:get(RelSeq, SegEntriesOut),
+ {Obj, AdditionalUnackedDelta} =
+ segment_plus_journal1(SegEntry, JObj),
+ {case Obj of
+ undefined -> array:reset(RelSeq, SegEntriesOut);
+ _ -> array:set(RelSeq, Obj, SegEntriesOut)
+ end,
+ AdditionalUnacked + AdditionalUnackedDelta}
+ end, {SegEntries, 0}, JEntries).
+
+%% Here, the result is a tuple with the first element containing the
+%% item which we may be adding to (for items only in the journal),
+%% modifying in (bits in both), or, when returning 'undefined',
+%% erasing from (ack in journal, not segment) the segment array. The
+%% other element of the tuple is the delta for AdditionalUnacked.
+segment_plus_journal1(undefined, {?PUB, no_del, no_ack} = Obj) ->
+ {Obj, 1};
+segment_plus_journal1(undefined, {?PUB, del, no_ack} = Obj) ->
+ {Obj, 1};
+segment_plus_journal1(undefined, {?PUB, del, ack}) ->
+ {undefined, 0};
+
+segment_plus_journal1({?PUB = Pub, no_del, no_ack}, {no_pub, del, no_ack}) ->
+ {{Pub, del, no_ack}, 0};
+segment_plus_journal1({?PUB, no_del, no_ack}, {no_pub, del, ack}) ->
+ {undefined, -1};
+segment_plus_journal1({?PUB, del, no_ack}, {no_pub, no_del, ack}) ->
+ {undefined, -1}.
+
+%% Remove from the journal entries for a segment, items that are
+%% duplicates of entries found in the segment itself. Used on start up
+%% to clean up the journal.
+%%
+%% We need to update the entries_to_segment since they are just a
+%% cache of what's on the journal.
+journal_minus_segment(JEntries, EToSeg, SegEntries) ->
+ array:sparse_foldl(
+ fun (RelSeq, JObj, {JEntriesOut, EToSegOut, UnackedRemoved}) ->
+ SegEntry = array:get(RelSeq, SegEntries),
+ {Obj, UnackedRemovedDelta} =
+ journal_minus_segment1(JObj, SegEntry),
+ {JEntriesOut1, EToSegOut1} =
+ case Obj of
+ keep ->
+ {JEntriesOut, EToSegOut};
+ undefined ->
+ {array:reset(RelSeq, JEntriesOut),
+ array:reset(RelSeq, EToSegOut)};
+ _ ->
+ {array:set(RelSeq, Obj, JEntriesOut),
+ array:set(RelSeq, entry_to_segment(RelSeq, Obj, []),
+ EToSegOut)}
+ end,
+ {JEntriesOut1, EToSegOut1, UnackedRemoved + UnackedRemovedDelta}
+ end, {JEntries, EToSeg, 0}, JEntries).
+
+%% Here, the result is a tuple with the first element containing the
+%% item we are adding to or modifying in the (initially fresh) journal
+%% array. If the item is 'undefined' we leave the journal array
+%% alone. The other element of the tuple is the deltas for
+%% UnackedRemoved.
+
+%% Both the same. Must be at least the publish
+journal_minus_segment1({?PUB, _Del, no_ack} = Obj, Obj) ->
+ {undefined, 1};
+journal_minus_segment1({?PUB, _Del, ack} = Obj, Obj) ->
+ {undefined, 0};
+
+%% Just publish in journal
+journal_minus_segment1({?PUB, no_del, no_ack}, undefined) ->
+ {keep, 0};
+
+%% Publish and deliver in journal
+journal_minus_segment1({?PUB, del, no_ack}, undefined) ->
+ {keep, 0};
+journal_minus_segment1({?PUB = Pub, del, no_ack}, {Pub, no_del, no_ack}) ->
+ {{no_pub, del, no_ack}, 1};
+
+%% Publish, deliver and ack in journal
+journal_minus_segment1({?PUB, del, ack}, undefined) ->
+ {keep, 0};
+journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, no_del, no_ack}) ->
+ {{no_pub, del, ack}, 1};
+journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, del, no_ack}) ->
+ {{no_pub, no_del, ack}, 1};
+
+%% Just deliver in journal
+journal_minus_segment1({no_pub, del, no_ack}, {?PUB, no_del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, del, no_ack}, {?PUB, del, no_ack}) ->
+ {undefined, 0};
+
+%% Just ack in journal
+journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, ack}) ->
+ {undefined, -1};
+
+%% Deliver and ack in journal
+journal_minus_segment1({no_pub, del, ack}, {?PUB, no_del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, del, ack}, {?PUB, del, no_ack}) ->
+ {{no_pub, no_del, ack}, 0};
+journal_minus_segment1({no_pub, del, ack}, {?PUB, del, ack}) ->
+ {undefined, -1};
+
+%% Missing segment. If flush_journal/1 is interrupted after deleting
+%% the segment but before truncating the journal we can get these
+%% cases: a delivery and an acknowledgement in the journal, or just an
+%% acknowledgement in the journal, but with no segment. In both cases
+%% we have really forgotten the message; so ignore what's in the
+%% journal.
+journal_minus_segment1({no_pub, no_del, ack}, undefined) ->
+ {undefined, 0};
+journal_minus_segment1({no_pub, del, ack}, undefined) ->
+ {undefined, 0}.
+
+%%----------------------------------------------------------------------------
+%% upgrade
+%%----------------------------------------------------------------------------
+
+-spec add_queue_ttl() -> 'ok'.
+
+add_queue_ttl() ->
+ foreach_queue_index({fun add_queue_ttl_journal/1,
+ fun add_queue_ttl_segment/1}).
+
+add_queue_ttl_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+add_queue_ttl_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+add_queue_ttl_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ MsgId:?MSG_ID_BYTES/binary, Rest/binary>>) ->
+ {[<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, MsgId,
+ expiry_to_binary(undefined)], Rest};
+add_queue_ttl_journal(_) ->
+ stop.
+
+add_queue_ttl_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BYTES/binary,
+ Rest/binary>>) ->
+ {[<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS>>,
+ MsgId, expiry_to_binary(undefined)], Rest};
+add_queue_ttl_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+add_queue_ttl_segment(_) ->
+ stop.
+
+avoid_zeroes() ->
+ foreach_queue_index({none, fun avoid_zeroes_segment/1}).
+
+avoid_zeroes_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Rest/binary>>) ->
+ {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS>>, Rest};
+avoid_zeroes_segment(<<0:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+avoid_zeroes_segment(_) ->
+ stop.
+
+%% At upgrade time we just define every message's size as 0 - that
+%% will save us a load of faff with the message store, and means we
+%% can actually use the clean recovery terms in VQ. It does mean we
+%% don't count message bodies from before the migration, but we can
+%% live with that.
+store_msg_size() ->
+ foreach_queue_index({fun store_msg_size_journal/1,
+ fun store_msg_size_segment/1}).
+
+store_msg_size_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_size_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_size_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS,
+ Rest/binary>>) ->
+ {<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, 0:?SIZE_BITS>>, Rest};
+store_msg_size_journal(_) ->
+ stop.
+
+store_msg_size_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Rest/binary>>) ->
+ {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, 0:?SIZE_BITS>>, Rest};
+store_msg_size_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+store_msg_size_segment(_) ->
+ stop.
+
+store_msg() ->
+ foreach_queue_index({fun store_msg_journal/1,
+ fun store_msg_segment/1}).
+
+store_msg_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+ Rest/binary>>) ->
+ {<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+ 0:?EMBEDDED_SIZE_BITS>>, Rest};
+store_msg_journal(_) ->
+ stop.
+
+store_msg_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Size:?SIZE_BITS, Rest/binary>>) ->
+ {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+ 0:?EMBEDDED_SIZE_BITS>>, Rest};
+store_msg_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+store_msg_segment(_) ->
+ stop.
+
+
+
+%%----------------------------------------------------------------------------
+%% Migration functions
+%%----------------------------------------------------------------------------
+
+foreach_queue_index(Funs) ->
+ QueueDirNames = all_queue_directory_names(),
+ {ok, Gatherer} = gatherer:start_link(),
+ [begin
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () ->
+ transform_queue(QueueDirName, Gatherer, Funs)
+ end)
+ end || QueueDirName <- QueueDirNames],
+ empty = gatherer:out(Gatherer),
+ ok = gatherer:stop(Gatherer).
+
+transform_queue(Dir, Gatherer, {JournalFun, SegmentFun}) ->
+ ok = transform_file(filename:join(Dir, ?JOURNAL_FILENAME), JournalFun),
+ [ok = transform_file(filename:join(Dir, Seg), SegmentFun)
+ || Seg <- rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)],
+ ok = gatherer:finish(Gatherer).
+
+transform_file(_Path, none) ->
+ ok;
+transform_file(Path, Fun) when is_function(Fun)->
+ PathTmp = Path ++ ".upgrade",
+ case rabbit_file:file_size(Path) of
+ 0 -> ok;
+ Size -> {ok, PathTmpHdl} =
+ file_handle_cache:open_with_absolute_path(
+ PathTmp, ?WRITE_MODE,
+ [{write_buffer, infinity}]),
+
+ {ok, PathHdl} = file_handle_cache:open_with_absolute_path(
+ Path, ?READ_MODE, [{read_buffer, Size}]),
+ {ok, Content} = file_handle_cache:read(PathHdl, Size),
+ ok = file_handle_cache:close(PathHdl),
+
+ ok = drive_transform_fun(Fun, PathTmpHdl, Content),
+
+ ok = file_handle_cache:close(PathTmpHdl),
+ ok = rabbit_file:rename(PathTmp, Path)
+ end.
+
+drive_transform_fun(Fun, Hdl, Contents) ->
+ case Fun(Contents) of
+ stop -> ok;
+ {Output, Contents1} -> ok = file_handle_cache:append(Hdl, Output),
+ drive_transform_fun(Fun, Hdl, Contents1)
+ end.
+
+move_to_per_vhost_stores(#resource{virtual_host = VHost} = QueueName) ->
+ OldQueueDir = filename:join([queues_base_dir(), "queues",
+ queue_name_to_dir_name_legacy(QueueName)]),
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ NewQueueDir = queue_dir(VHostDir, QueueName),
+ rabbit_log_upgrade:info("About to migrate queue directory '~s' to '~s'",
+ [OldQueueDir, NewQueueDir]),
+ case rabbit_file:is_dir(OldQueueDir) of
+ true ->
+ ok = rabbit_file:ensure_dir(NewQueueDir),
+ ok = rabbit_file:rename(OldQueueDir, NewQueueDir),
+ ok = ensure_queue_name_stub_file(NewQueueDir, QueueName);
+ false ->
+ Msg = "Queue index directory '~s' not found for ~s~n",
+ Args = [OldQueueDir, rabbit_misc:rs(QueueName)],
+ rabbit_log_upgrade:error(Msg, Args),
+ rabbit_log:error(Msg, Args)
+ end,
+ ok.
+
+ensure_queue_name_stub_file(Dir, #resource{virtual_host = VHost, name = QName}) ->
+ QueueNameFile = filename:join(Dir, ?QUEUE_NAME_STUB_FILE),
+ file:write_file(QueueNameFile, <<"VHOST: ", VHost/binary, "\n",
+ "QUEUE: ", QName/binary, "\n">>).
+
+read_global_recovery_terms(DurableQueueNames) ->
+ ok = rabbit_recovery_terms:open_global_table(),
+
+ DurableTerms =
+ lists:foldl(
+ fun(QName, RecoveryTerms) ->
+ DirName = queue_name_to_dir_name_legacy(QName),
+ RecoveryInfo = case rabbit_recovery_terms:read_global(DirName) of
+ {error, _} -> non_clean_shutdown;
+ {ok, Terms} -> Terms
+ end,
+ [RecoveryInfo | RecoveryTerms]
+ end, [], DurableQueueNames),
+
+ ok = rabbit_recovery_terms:close_global_table(),
+ %% The backing queue interface requires that the queue recovery terms
+ %% which come back from start/1 are in the same order as DurableQueueNames
+ OrderedTerms = lists:reverse(DurableTerms),
+ {OrderedTerms, {fun queue_index_walker/1, {start, DurableQueueNames}}}.
+
+cleanup_global_recovery_terms() ->
+ rabbit_file:recursive_delete([filename:join([queues_base_dir(), "queues"])]),
+ rabbit_recovery_terms:delete_global_table(),
+ ok.
+
+
+update_recovery_term(#resource{virtual_host = VHost} = QueueName, Term) ->
+ Key = queue_name_to_dir_name(QueueName),
+ rabbit_recovery_terms:store(VHost, Key, Term).
diff --git a/deps/rabbit/src/rabbit_queue_location_client_local.erl b/deps/rabbit/src/rabbit_queue_location_client_local.erl
new file mode 100644
index 0000000000..2df1608534
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_location_client_local.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_location_client_local).
+-behaviour(rabbit_queue_master_locator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master client local"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"client-local">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description, <<"Locate queue master node as the client local node">>}].
+
+queue_master_location(Q) when ?is_amqqueue(Q) ->
+ %% unlike with other locator strategies we do not check node maintenance
+ %% status for two reasons:
+ %%
+ %% * nodes in maintenance mode will drop their client connections
+ %% * with other strategies, if no nodes are available, the current node
+ %% is returned but this strategy already does just that
+ {ok, node()}.
diff --git a/deps/rabbit/src/rabbit_queue_location_min_masters.erl b/deps/rabbit/src/rabbit_queue_location_min_masters.erl
new file mode 100644
index 0000000000..6535f082fe
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_location_min_masters.erl
@@ -0,0 +1,70 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_location_min_masters).
+-behaviour(rabbit_queue_master_locator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master min bound queues"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"min-masters">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description,
+ <<"Locate queue master node from cluster node with least bound queues">>}].
+
+queue_master_location(Q) when ?is_amqqueue(Q) ->
+ Cluster = rabbit_queue_master_location_misc:all_nodes(Q),
+ QueueNames = rabbit_amqqueue:list_names(),
+ MastersPerNode0 = lists:foldl(
+ fun(#resource{virtual_host = VHost, name = QueueName}, NodeMasters) ->
+ case rabbit_queue_master_location_misc:lookup_master(QueueName, VHost) of
+ {ok, Master} when is_atom(Master) ->
+ case maps:is_key(Master, NodeMasters) of
+ true -> maps:update_with(Master,
+ fun(N) -> N + 1 end,
+ NodeMasters);
+ false -> NodeMasters
+ end;
+ _ -> NodeMasters
+ end
+ end,
+ maps:from_list([{N, 0} || N <- Cluster]),
+ QueueNames),
+
+ MastersPerNode = maps:filter(fun (Node, _N) ->
+ not rabbit_maintenance:is_being_drained_local_read(Node)
+ end, MastersPerNode0),
+
+ case map_size(MastersPerNode) > 0 of
+ true ->
+ {MinNode, _NMasters} = maps:fold(
+ fun(Node, NMasters, init) ->
+ {Node, NMasters};
+ (Node, NMasters, {MinNode, MinMasters}) ->
+ case NMasters < MinMasters of
+ true -> {Node, NMasters};
+ false -> {MinNode, MinMasters}
+ end
+ end,
+ init, MastersPerNode),
+ {ok, MinNode};
+ false ->
+ undefined
+ end.
diff --git a/deps/rabbit/src/rabbit_queue_location_random.erl b/deps/rabbit/src/rabbit_queue_location_random.erl
new file mode 100644
index 0000000000..7232fc6703
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_location_random.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_location_random).
+-behaviour(rabbit_queue_master_locator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master random"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"random">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description,
+ <<"Locate queue master node from cluster in a random manner">>}].
+
+queue_master_location(Q) when ?is_amqqueue(Q) ->
+ Cluster0 = rabbit_queue_master_location_misc:all_nodes(Q),
+ Cluster = rabbit_maintenance:filter_out_drained_nodes_local_read(Cluster0),
+ case Cluster of
+ [] ->
+ undefined;
+ Candidates when is_list(Candidates) ->
+ RandomPos = erlang:phash2(erlang:monotonic_time(), length(Candidates)),
+ MasterNode = lists:nth(RandomPos + 1, Candidates),
+ {ok, MasterNode}
+ end.
diff --git a/deps/rabbit/src/rabbit_queue_location_validator.erl b/deps/rabbit/src/rabbit_queue_location_validator.erl
new file mode 100644
index 0000000000..bf41be622c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_location_validator.erl
@@ -0,0 +1,67 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_location_validator).
+-behaviour(rabbit_policy_validator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([validate_policy/1, validate_strategy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "Queue location policy validation"},
+ {mfa, {rabbit_registry, register,
+ [policy_validator,
+ <<"queue-master-locator">>,
+ ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+validate_policy(KeyList) ->
+ case proplists:lookup(<<"queue-master-locator">> , KeyList) of
+ {_, Strategy} -> case validate_strategy(Strategy) of
+ {error, _, _} = Er -> Er;
+ _ -> ok
+ end;
+ _ -> {error, "queue-master-locator undefined"}
+ end.
+
+validate_strategy(Strategy) ->
+ case module(Strategy) of
+ R = {ok, _M} -> R;
+ _ ->
+ {error, "~p invalid queue-master-locator value", [Strategy]}
+ end.
+
+policy(Policy, Q) ->
+ case rabbit_policy:get(Policy, Q) of
+ undefined -> none;
+ P -> P
+ end.
+
+module(Q) when ?is_amqqueue(Q) ->
+ case policy(<<"queue-master-locator">>, Q) of
+ undefined -> no_location_strategy;
+ Mode -> module(Mode)
+ end;
+module(Strategy) when is_binary(Strategy) ->
+ case rabbit_registry:binary_to_type(Strategy) of
+ {error, not_found} -> no_location_strategy;
+ T ->
+ case rabbit_registry:lookup_module(queue_master_locator, T) of
+ {ok, Module} ->
+ case code:which(Module) of
+ non_existing -> no_location_strategy;
+ _ -> {ok, Module}
+ end;
+ _ ->
+ no_location_strategy
+ end
+ end;
+module(Strategy) ->
+ module(rabbit_data_coercion:to_binary(Strategy)).
diff --git a/deps/rabbit/src/rabbit_queue_master_location_misc.erl b/deps/rabbit/src/rabbit_queue_master_location_misc.erl
new file mode 100644
index 0000000000..37698e184f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_master_location_misc.erl
@@ -0,0 +1,108 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_master_location_misc).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([lookup_master/2,
+ lookup_queue/2,
+ get_location/1,
+ get_location_mod_by_config/1,
+ get_location_mod_by_args/1,
+ get_location_mod_by_policy/1,
+ all_nodes/1]).
+
+-spec lookup_master(binary(), binary()) -> {ok, node()} | {error, not_found}.
+lookup_master(QueueNameBin, VHostPath) when is_binary(QueueNameBin),
+ is_binary(VHostPath) ->
+ QueueR = rabbit_misc:r(VHostPath, queue, QueueNameBin),
+ case rabbit_amqqueue:lookup(QueueR) of
+ {ok, Queue} when ?amqqueue_has_valid_pid(Queue) ->
+ Pid = amqqueue:get_pid(Queue),
+ {ok, node(Pid)};
+ Error -> Error
+ end.
+
+lookup_queue(QueueNameBin, VHostPath) when is_binary(QueueNameBin),
+ is_binary(VHostPath) ->
+ QueueR = rabbit_misc:r(VHostPath, queue, QueueNameBin),
+ case rabbit_amqqueue:lookup(QueueR) of
+ Reply = {ok, Queue} when ?is_amqqueue(Queue) ->
+ Reply;
+ Error ->
+ Error
+ end.
+
+get_location(Queue) when ?is_amqqueue(Queue) ->
+ Reply1 = case get_location_mod_by_args(Queue) of
+ _Err1 = {error, _} ->
+ case get_location_mod_by_policy(Queue) of
+ _Err2 = {error, _} ->
+ case get_location_mod_by_config(Queue) of
+ Err3 = {error, _} -> Err3;
+ Reply0 = {ok, _Module} -> Reply0
+ end;
+ Reply0 = {ok, _Module} -> Reply0
+ end;
+ Reply0 = {ok, _Module} -> Reply0
+ end,
+
+ case Reply1 of
+ {ok, CB} -> CB:queue_master_location(Queue);
+ Error -> Error
+ end.
+
+get_location_mod_by_args(Queue) when ?is_amqqueue(Queue) ->
+ Args = amqqueue:get_arguments(Queue),
+ case rabbit_misc:table_lookup(Args, <<"x-queue-master-locator">>) of
+ {_Type, Strategy} ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end;
+ _ -> {error, "x-queue-master-locator undefined"}
+ end.
+
+get_location_mod_by_policy(Queue) when ?is_amqqueue(Queue) ->
+ case rabbit_policy:get(<<"queue-master-locator">> , Queue) of
+ undefined -> {error, "queue-master-locator policy undefined"};
+ Strategy ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end
+ end.
+
+get_location_mod_by_config(Queue) when ?is_amqqueue(Queue) ->
+ case application:get_env(rabbit, queue_master_locator) of
+ {ok, Strategy} ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end;
+ _ -> {error, "queue_master_locator undefined"}
+ end.
+
+all_nodes(Queue) when ?is_amqqueue(Queue) ->
+ handle_is_mirrored_ha_nodes(rabbit_mirror_queue_misc:is_mirrored_ha_nodes(Queue), Queue).
+
+handle_is_mirrored_ha_nodes(false, _Queue) ->
+ % Note: ha-mode is NOT 'nodes' - it is either exactly or all, which means
+ % that any node in the cluster is eligible to be the new queue master node
+ rabbit_nodes:all_running();
+handle_is_mirrored_ha_nodes(true, Queue) ->
+ % Note: ha-mode is 'nodes', which explicitly specifies allowed nodes.
+ % We must use suggested_queue_nodes to get that list of nodes as the
+ % starting point for finding the queue master location
+ handle_suggested_queue_nodes(rabbit_mirror_queue_misc:suggested_queue_nodes(Queue)).
+
+handle_suggested_queue_nodes({_MNode, []}) ->
+ rabbit_nodes:all_running();
+handle_suggested_queue_nodes({MNode, SNodes}) ->
+ [MNode | SNodes].
diff --git a/deps/rabbit/src/rabbit_queue_master_locator.erl b/deps/rabbit/src/rabbit_queue_master_locator.erl
new file mode 100644
index 0000000000..ff2e30f587
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_master_locator.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_master_locator).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-callback description() -> [proplists:property()].
+-callback queue_master_location(amqqueue:amqqueue()) ->
+ {'ok', node()} | {'error', term()}.
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit/src/rabbit_queue_type.erl b/deps/rabbit/src/rabbit_queue_type.erl
new file mode 100644
index 0000000000..4e59b6a7c0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_type.erl
@@ -0,0 +1,581 @@
+-module(rabbit_queue_type).
+-include("amqqueue.hrl").
+-include_lib("rabbit_common/include/resource.hrl").
+
+-export([
+ init/0,
+ close/1,
+ discover/1,
+ default/0,
+ is_enabled/1,
+ declare/2,
+ delete/4,
+ is_recoverable/1,
+ recover/2,
+ purge/1,
+ policy_changed/1,
+ stat/1,
+ remove/2,
+ info/2,
+ state_info/1,
+ info_down/2,
+ info_down/3,
+ %% stateful client API
+ new/2,
+ consume/3,
+ cancel/5,
+ handle_down/3,
+ handle_event/3,
+ module/2,
+ deliver/3,
+ settle/5,
+ credit/5,
+ dequeue/5,
+ fold_state/3,
+ is_policy_applicable/2,
+ is_server_named_allowed/1
+ ]).
+
+%% gah what is a good identity of a classic queue including all replicas
+-type queue_name() :: rabbit_types:r(queue).
+-type queue_ref() :: queue_name() | atom().
+-type queue_state() :: term().
+-type msg_tag() :: term().
+
+-define(STATE, ?MODULE).
+
+%% Recoverable slaves shouldn't really be a generic one, but let's keep it here until
+%% mirrored queues are deprecated.
+-define(DOWN_KEYS, [name, durable, auto_delete, arguments, pid, recoverable_slaves, type, state]).
+
+-define(QREF(QueueReference),
+ (is_tuple(QueueReference) andalso element(1, QueueReference) == resource)
+ orelse is_atom(QueueReference)).
+%% anything that the host process needs to do on behalf of the queue type
+%% session, like knowing when to notify on monitor down
+-type action() ::
+ {monitor, Pid :: pid(), queue_ref()} |
+ %% indicate to the queue type module that a message has been delivered
+ %% fully to the queue
+ {settled, Success :: boolean(), [msg_tag()]} |
+ {deliver, rabbit_types:ctag(), boolean(), [rabbit_amqqueue:qmsg()]}.
+
+-type actions() :: [action()].
+
+-type event() ::
+ {down, pid(), Info :: term()} |
+ term().
+
+-record(ctx, {module :: module(),
+ name :: queue_name(),
+ %% "publisher confirm queue accounting"
+ %% queue type implementation should emit a:
+ %% {settle, Success :: boolean(), msg_tag()}
+ %% to either settle or reject the delivery of a
+ %% message to the queue instance
+ %% The queue type module will then emit a {confirm | reject, [msg_tag()}
+ %% action to the channel or channel like process when a msg_tag
+ %% has reached its conclusion
+ state :: queue_state()}).
+
+
+-record(?STATE, {ctxs = #{} :: #{queue_ref() => #ctx{} | queue_ref()},
+ monitor_registry = #{} :: #{pid() => queue_ref()}
+ }).
+
+-opaque state() :: #?STATE{}.
+
+-type consume_spec() :: #{no_ack := boolean(),
+ channel_pid := pid(),
+ limiter_pid => pid(),
+ limiter_active => boolean(),
+ prefetch_count => non_neg_integer(),
+ consumer_tag := rabbit_types:ctag(),
+ exclusive_consume => boolean(),
+ args => rabbit_framing:amqp_table(),
+ ok_msg := term(),
+ acting_user := rabbit_types:username()}.
+
+
+
+% copied from rabbit_amqqueue
+-type absent_reason() :: 'nodedown' | 'crashed' | stopped | timeout.
+
+-type settle_op() :: 'complete' | 'requeue' | 'discard'.
+
+-export_type([state/0,
+ consume_spec/0,
+ action/0,
+ actions/0,
+ settle_op/0]).
+
+%% is the queue type feature enabled
+-callback is_enabled() -> boolean().
+
+-callback declare(amqqueue:amqqueue(), node()) ->
+ {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} |
+ {'absent', amqqueue:amqqueue(), absent_reason()} |
+ {'protocol_error', Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback delete(amqqueue:amqqueue(),
+ boolean(),
+ boolean(),
+ rabbit_types:username()) ->
+ rabbit_types:ok(non_neg_integer()) |
+ rabbit_types:error(in_use | not_empty) |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback recover(rabbit_types:vhost(), [amqqueue:amqqueue()]) ->
+ {Recovered :: [amqqueue:amqqueue()],
+ Failed :: [amqqueue:amqqueue()]}.
+
+%% checks if the queue should be recovered
+-callback is_recoverable(amqqueue:amqqueue()) ->
+ boolean().
+
+-callback purge(amqqueue:amqqueue()) ->
+ {ok, non_neg_integer()} | {error, term()}.
+
+-callback policy_changed(amqqueue:amqqueue()) -> ok.
+
+%% stateful
+%% intitialise and return a queue type specific session context
+-callback init(amqqueue:amqqueue()) -> queue_state().
+
+-callback close(queue_state()) -> ok.
+%% update the queue type state from amqqrecord
+-callback update(amqqueue:amqqueue(), queue_state()) -> queue_state().
+
+-callback consume(amqqueue:amqqueue(),
+ consume_spec(),
+ queue_state()) ->
+ {ok, queue_state(), actions()} | {error, term()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback cancel(amqqueue:amqqueue(),
+ rabbit_types:ctag(),
+ term(),
+ rabbit_types:username(),
+ queue_state()) ->
+ {ok, queue_state()} | {error, term()}.
+
+%% any async events returned from the queue system should be processed through
+%% this
+-callback handle_event(Event :: event(),
+ queue_state()) ->
+ {ok, queue_state(), actions()} | {error, term()} | eol |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback deliver([{amqqueue:amqqueue(), queue_state()}],
+ Delivery :: term()) ->
+ {[{amqqueue:amqqueue(), queue_state()}], actions()}.
+
+-callback settle(settle_op(), rabbit_types:ctag(), [non_neg_integer()], queue_state()) ->
+ {queue_state(), actions()} |
+ {'protocol_error', Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback credit(rabbit_types:ctag(),
+ non_neg_integer(), Drain :: boolean(), queue_state()) ->
+ {queue_state(), actions()}.
+
+-callback dequeue(NoAck :: boolean(), LimiterPid :: pid(),
+ rabbit_types:ctag(), queue_state()) ->
+ {ok, Count :: non_neg_integer(), rabbit_amqqueue:qmsg(), queue_state()} |
+ {empty, queue_state()} |
+ {error, term()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+
+%% return a map of state summary information
+-callback state_info(queue_state()) ->
+ #{atom() := term()}.
+
+%% general queue info
+-callback info(amqqueue:amqqueue(), all_keys | rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+
+-callback stat(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer(), non_neg_integer()}.
+
+-callback capabilities() ->
+ #{atom() := term()}.
+
+%% TODO: this should be controlled by a registry that is populated on boot
+discover(<<"quorum">>) ->
+ rabbit_quorum_queue;
+discover(<<"classic">>) ->
+ rabbit_classic_queue;
+discover(<<"stream">>) ->
+ rabbit_stream_queue.
+
+default() ->
+ rabbit_classic_queue.
+
+-spec is_enabled(module()) -> boolean().
+is_enabled(Type) ->
+ Type:is_enabled().
+
+-spec declare(amqqueue:amqqueue(), node()) ->
+ {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} |
+ {'absent', amqqueue:amqqueue(), absent_reason()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(Q, Node) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:declare(Q, Node).
+
+-spec delete(amqqueue:amqqueue(), boolean(),
+ boolean(), rabbit_types:username()) ->
+ rabbit_types:ok(non_neg_integer()) |
+ rabbit_types:error(in_use | not_empty) |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+delete(Q, IfUnused, IfEmpty, ActingUser) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:delete(Q, IfUnused, IfEmpty, ActingUser).
+
+-spec purge(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer()} | {error, term()}.
+purge(Q) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:purge(Q).
+
+-spec policy_changed(amqqueue:amqqueue()) -> 'ok'.
+policy_changed(Q) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:policy_changed(Q).
+
+-spec stat(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer(), non_neg_integer()}.
+stat(Q) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:stat(Q).
+
+-spec remove(queue_ref(), state()) -> state().
+remove(QRef, #?STATE{ctxs = Ctxs0} = State) ->
+ case maps:take(QRef, Ctxs0) of
+ error ->
+ State;
+ {_, Ctxs} ->
+ State#?STATE{ctxs = Ctxs}
+ end.
+
+-spec info(amqqueue:amqqueue(), all_keys | rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+info(Q, Items) when ?amqqueue_state_is(Q, crashed) ->
+ info_down(Q, Items, crashed);
+info(Q, Items) when ?amqqueue_state_is(Q, stopped) ->
+ info_down(Q, Items, stopped);
+info(Q, Items) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:info(Q, Items).
+
+fold_state(Fun, Acc, #?STATE{ctxs = Ctxs}) ->
+ maps:fold(Fun, Acc, Ctxs).
+
+state_info(#ctx{state = S,
+ module = Mod}) ->
+ Mod:state_info(S);
+state_info(_) ->
+ #{}.
+
+down_keys() -> ?DOWN_KEYS.
+
+info_down(Q, DownReason) ->
+ info_down(Q, down_keys(), DownReason).
+
+info_down(Q, all_keys, DownReason) ->
+ info_down(Q, down_keys(), DownReason);
+info_down(Q, Items, DownReason) ->
+ [{Item, i_down(Item, Q, DownReason)} || Item <- Items].
+
+i_down(name, Q, _) -> amqqueue:get_name(Q);
+i_down(durable, Q, _) -> amqqueue:is_durable(Q);
+i_down(auto_delete, Q, _) -> amqqueue:is_auto_delete(Q);
+i_down(arguments, Q, _) -> amqqueue:get_arguments(Q);
+i_down(pid, Q, _) -> amqqueue:get_pid(Q);
+i_down(recoverable_slaves, Q, _) -> amqqueue:get_recoverable_slaves(Q);
+i_down(type, Q, _) -> amqqueue:get_type(Q);
+i_down(state, _Q, DownReason) -> DownReason;
+i_down(_K, _Q, _DownReason) -> ''.
+
+is_policy_applicable(Q, Policy) ->
+ Mod = amqqueue:get_type(Q),
+ Capabilities = Mod:capabilities(),
+ Applicable = maps:get(policies, Capabilities, []),
+ lists:all(fun({P, _}) ->
+ lists:member(P, Applicable)
+ end, Policy).
+
+is_server_named_allowed(Type) ->
+ Capabilities = Type:capabilities(),
+ maps:get(server_named, Capabilities, false).
+
+-spec init() -> state().
+init() ->
+ #?STATE{}.
+
+-spec close(state()) -> ok.
+close(#?STATE{ctxs = Contexts}) ->
+ _ = maps:map(
+ fun (_, #ctx{module = Mod,
+ state = S}) ->
+ ok = Mod:close(S)
+ end, Contexts),
+ ok.
+
+-spec new(amqqueue:amqqueue(), state()) -> state().
+new(Q, State) when ?is_amqqueue(Q) ->
+ Ctx = get_ctx(Q, State),
+ set_ctx(Q, Ctx, State).
+
+-spec consume(amqqueue:amqqueue(), consume_spec(), state()) ->
+ {ok, state(), actions()} | {error, term()}.
+consume(Q, Spec, State) ->
+ #ctx{state = CtxState0} = Ctx = get_ctx(Q, State),
+ Mod = amqqueue:get_type(Q),
+ case Mod:consume(Q, Spec, CtxState0) of
+ {ok, CtxState, Actions} ->
+ return_ok(set_ctx(Q, Ctx#ctx{state = CtxState}, State), Actions);
+ Err ->
+ Err
+ end.
+
+%% TODO switch to cancel spec api
+-spec cancel(amqqueue:amqqueue(),
+ rabbit_types:ctag(),
+ term(),
+ rabbit_types:username(),
+ state()) ->
+ {ok, state()} | {error, term()}.
+cancel(Q, Tag, OkMsg, ActiveUser, Ctxs) ->
+ #ctx{state = State0} = Ctx = get_ctx(Q, Ctxs),
+ Mod = amqqueue:get_type(Q),
+ case Mod:cancel(Q, Tag, OkMsg, ActiveUser, State0) of
+ {ok, State} ->
+ {ok, set_ctx(Q, Ctx#ctx{state = State}, Ctxs)};
+ Err ->
+ Err
+ end.
+
+-spec is_recoverable(amqqueue:amqqueue()) ->
+ boolean().
+is_recoverable(Q) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:is_recoverable(Q).
+
+-spec recover(rabbit_types:vhost(), [amqqueue:amqqueue()]) ->
+ {Recovered :: [amqqueue:amqqueue()],
+ Failed :: [amqqueue:amqqueue()]}.
+recover(VHost, Qs) ->
+ ByType = lists:foldl(
+ fun (Q, Acc) ->
+ T = amqqueue:get_type(Q),
+ maps:update_with(T, fun (X) ->
+ [Q | X]
+ end, Acc)
+ %% TODO resolve all registered queue types from registry
+ end, #{rabbit_classic_queue => [],
+ rabbit_quorum_queue => [],
+ rabbit_stream_queue => []}, Qs),
+ maps:fold(fun (Mod, Queues, {R0, F0}) ->
+ {R, F} = Mod:recover(VHost, Queues),
+ {R0 ++ R, F0 ++ F}
+ end, {[], []}, ByType).
+
+-spec handle_down(pid(), term(), state()) ->
+ {ok, state(), actions()} | {eol, queue_ref()} | {error, term()}.
+handle_down(Pid, Info, #?STATE{monitor_registry = Reg0} = State0) ->
+ %% lookup queue ref in monitor registry
+ case maps:take(Pid, Reg0) of
+ {QRef, Reg} ->
+ case handle_event(QRef, {down, Pid, Info}, State0) of
+ {ok, State, Actions} ->
+ {ok, State#?STATE{monitor_registry = Reg}, Actions};
+ eol ->
+ {eol, QRef};
+ Err ->
+ Err
+ end;
+ error ->
+ {ok, State0, []}
+ end.
+
+%% messages sent from queues
+-spec handle_event(queue_ref(), term(), state()) ->
+ {ok, state(), actions()} | eol | {error, term()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+handle_event(QRef, Evt, Ctxs) ->
+ %% events can arrive after a queue state has been cleared up
+ %% so need to be defensive here
+ case get_ctx(QRef, Ctxs, undefined) of
+ #ctx{module = Mod,
+ state = State0} = Ctx ->
+ case Mod:handle_event(Evt, State0) of
+ {ok, State, Actions} ->
+ return_ok(set_ctx(QRef, Ctx#ctx{state = State}, Ctxs), Actions);
+ Err ->
+ Err
+ end;
+ undefined ->
+ {ok, Ctxs, []}
+ end.
+
+-spec module(queue_ref(), state()) ->
+ {ok, module()} | {error, not_found}.
+module(QRef, Ctxs) ->
+ %% events can arrive after a queue state has been cleared up
+ %% so need to be defensive here
+ case get_ctx(QRef, Ctxs, undefined) of
+ #ctx{module = Mod} ->
+ {ok, Mod};
+ undefined ->
+ {error, not_found}
+ end.
+
+-spec deliver([amqqueue:amqqueue()], Delivery :: term(),
+ stateless | state()) ->
+ {ok, state(), actions()}.
+deliver(Qs, Delivery, stateless) ->
+ _ = lists:map(fun(Q) ->
+ Mod = amqqueue:get_type(Q),
+ _ = Mod:deliver([{Q, stateless}], Delivery)
+ end, Qs),
+ {ok, stateless, []};
+deliver(Qs, Delivery, #?STATE{} = State0) ->
+ %% sort by queue type - then dispatch each group
+ ByType = lists:foldl(
+ fun (Q, Acc) ->
+ T = amqqueue:get_type(Q),
+ Ctx = get_ctx(Q, State0),
+ maps:update_with(
+ T, fun (A) ->
+ [{Q, Ctx#ctx.state} | A]
+ end, [{Q, Ctx#ctx.state}], Acc)
+ end, #{}, Qs),
+ %%% dispatch each group to queue type interface?
+ {Xs, Actions} = maps:fold(fun(Mod, QSs, {X0, A0}) ->
+ {X, A} = Mod:deliver(QSs, Delivery),
+ {X0 ++ X, A0 ++ A}
+ end, {[], []}, ByType),
+ State = lists:foldl(
+ fun({Q, S}, Acc) ->
+ Ctx = get_ctx(Q, Acc),
+ set_ctx(qref(Q), Ctx#ctx{state = S}, Acc)
+ end, State0, Xs),
+ return_ok(State, Actions).
+
+
+-spec settle(queue_ref(), settle_op(), rabbit_types:ctag(),
+ [non_neg_integer()], state()) ->
+ {ok, state(), actions()} |
+ {'protocol_error', Type :: atom(), Reason :: string(), Args :: term()}.
+settle(QRef, Op, CTag, MsgIds, Ctxs)
+ when ?QREF(QRef) ->
+ case get_ctx(QRef, Ctxs, undefined) of
+ undefined ->
+ %% if we receive a settlement and there is no queue state it means
+ %% the queue was deleted with active consumers
+ {ok, Ctxs, []};
+ #ctx{state = State0,
+ module = Mod} = Ctx ->
+ case Mod:settle(Op, CTag, MsgIds, State0) of
+ {State, Actions} ->
+ {ok, set_ctx(QRef, Ctx#ctx{state = State}, Ctxs), Actions};
+ Err ->
+ Err
+ end
+ end.
+
+-spec credit(amqqueue:amqqueue() | queue_ref(),
+ rabbit_types:ctag(), non_neg_integer(),
+ boolean(), state()) -> {ok, state(), actions()}.
+credit(Q, CTag, Credit, Drain, Ctxs) ->
+ #ctx{state = State0,
+ module = Mod} = Ctx = get_ctx(Q, Ctxs),
+ {State, Actions} = Mod:credit(CTag, Credit, Drain, State0),
+ {ok, set_ctx(Q, Ctx#ctx{state = State}, Ctxs), Actions}.
+
+-spec dequeue(amqqueue:amqqueue(), boolean(),
+ pid(), rabbit_types:ctag(), state()) ->
+ {ok, non_neg_integer(), term(), state()} |
+ {empty, state()}.
+dequeue(Q, NoAck, LimiterPid, CTag, Ctxs) ->
+ #ctx{state = State0} = Ctx = get_ctx(Q, Ctxs),
+ Mod = amqqueue:get_type(Q),
+ case Mod:dequeue(NoAck, LimiterPid, CTag, State0) of
+ {ok, Num, Msg, State} ->
+ {ok, Num, Msg, set_ctx(Q, Ctx#ctx{state = State}, Ctxs)};
+ {empty, State} ->
+ {empty, set_ctx(Q, Ctx#ctx{state = State}, Ctxs)};
+ {error, _} = Err ->
+ Err;
+ {protocol_error, _, _, _} = Err ->
+ Err
+ end.
+
+get_ctx(Q, #?STATE{ctxs = Contexts}) when ?is_amqqueue(Q) ->
+ Ref = qref(Q),
+ case Contexts of
+ #{Ref := #ctx{module = Mod,
+ state = State} = Ctx} ->
+ Ctx#ctx{state = Mod:update(Q, State)};
+ _ ->
+ %% not found - initialize
+ Mod = amqqueue:get_type(Q),
+ Name = amqqueue:get_name(Q),
+ #ctx{module = Mod,
+ name = Name,
+ state = Mod:init(Q)}
+ end;
+get_ctx(QRef, Contexts) when ?QREF(QRef) ->
+ case get_ctx(QRef, Contexts, undefined) of
+ undefined ->
+ exit({queue_context_not_found, QRef});
+ Ctx ->
+ Ctx
+ end.
+
+get_ctx(QRef, #?STATE{ctxs = Contexts}, Default) ->
+ Ref = qref(QRef),
+ %% if we use a QRef it should always be initialised
+ case maps:get(Ref, Contexts, undefined) of
+ #ctx{} = Ctx ->
+ Ctx;
+ undefined ->
+ Default
+ end.
+
+set_ctx(Q, Ctx, #?STATE{ctxs = Contexts} = State)
+ when ?is_amqqueue(Q) ->
+ Ref = qref(Q),
+ State#?STATE{ctxs = maps:put(Ref, Ctx, Contexts)};
+set_ctx(QRef, Ctx, #?STATE{ctxs = Contexts} = State) ->
+ Ref = qref(QRef),
+ State#?STATE{ctxs = maps:put(Ref, Ctx, Contexts)}.
+
+qref(#resource{kind = queue} = QName) ->
+ QName;
+qref(Q) when ?is_amqqueue(Q) ->
+ amqqueue:get_name(Q).
+
+return_ok(State0, []) ->
+ {ok, State0, []};
+return_ok(State0, Actions0) ->
+ {State, Actions} =
+ lists:foldl(
+ fun({monitor, Pid, QRef},
+ {#?STATE{monitor_registry = M0} = S0, A0}) ->
+ case M0 of
+ #{Pid := QRef} ->
+ %% already monitored by the qref
+ {S0, A0};
+ #{Pid := _} ->
+ %% TODO: allow multiple Qrefs to monitor the same pid
+ exit(return_ok_duplicate_monitored_pid);
+ _ ->
+ _ = erlang:monitor(process, Pid),
+ M = M0#{Pid => QRef},
+ {S0#?STATE{monitor_registry = M}, A0}
+ end;
+ (Act, {S, A0}) ->
+ {S, [Act | A0]}
+ end, {State0, []}, Actions0),
+ {ok, State, lists:reverse(Actions)}.
diff --git a/deps/rabbit/src/rabbit_queue_type_util.erl b/deps/rabbit/src/rabbit_queue_type_util.erl
new file mode 100644
index 0000000000..e417cb13c4
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_type_util.erl
@@ -0,0 +1,74 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2018-2020 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_type_util).
+
+-export([args_policy_lookup/3,
+ qname_to_internal_name/1,
+ check_auto_delete/1,
+ check_exclusive/1,
+ check_non_durable/1,
+ run_checks/2]).
+
+-include("rabbit.hrl").
+-include("amqqueue.hrl").
+
+args_policy_lookup(Name, Resolve, Q) when ?is_amqqueue(Q) ->
+ Args = amqqueue:get_arguments(Q),
+ AName = <<"x-", Name/binary>>,
+ case {rabbit_policy:get(Name, Q), rabbit_misc:table_lookup(Args, AName)} of
+ {undefined, undefined} -> undefined;
+ {undefined, {_Type, Val}} -> Val;
+ {Val, undefined} -> Val;
+ {PolVal, {_Type, ArgVal}} -> Resolve(PolVal, ArgVal)
+ end.
+
+%% TODO escape hack
+qname_to_internal_name(#resource{virtual_host = <<"/">>, name = Name}) ->
+ erlang:binary_to_atom(<<"%2F_", Name/binary>>, utf8);
+qname_to_internal_name(#resource{virtual_host = VHost, name = Name}) ->
+ erlang:binary_to_atom(<<VHost/binary, "_", Name/binary>>, utf8).
+
+check_auto_delete(Q) when ?amqqueue_is_auto_delete(Q) ->
+ Name = amqqueue:get_name(Q),
+ {protocol_error, precondition_failed, "invalid property 'auto-delete' for ~s",
+ [rabbit_misc:rs(Name)]};
+check_auto_delete(_) ->
+ ok.
+
+check_exclusive(Q) when ?amqqueue_exclusive_owner_is(Q, none) ->
+ ok;
+check_exclusive(Q) when ?is_amqqueue(Q) ->
+ Name = amqqueue:get_name(Q),
+ {protocol_error, precondition_failed, "invalid property 'exclusive-owner' for ~s",
+ [rabbit_misc:rs(Name)]}.
+
+check_non_durable(Q) when ?amqqueue_is_durable(Q) ->
+ ok;
+check_non_durable(Q) when not ?amqqueue_is_durable(Q) ->
+ Name = amqqueue:get_name(Q),
+ {protocol_error, precondition_failed, "invalid property 'non-durable' for ~s",
+ [rabbit_misc:rs(Name)]}.
+
+run_checks([], _) ->
+ ok;
+run_checks([C | Checks], Q) ->
+ case C(Q) of
+ ok ->
+ run_checks(Checks, Q);
+ Err ->
+ Err
+ end.
diff --git a/deps/rabbit/src/rabbit_quorum_memory_manager.erl b/deps/rabbit/src/rabbit_quorum_memory_manager.erl
new file mode 100644
index 0000000000..94c2ef6b4b
--- /dev/null
+++ b/deps/rabbit/src/rabbit_quorum_memory_manager.erl
@@ -0,0 +1,67 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_quorum_memory_manager).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+-export([register/0, unregister/0]).
+
+-record(state, {last_roll_over,
+ interval}).
+
+-rabbit_boot_step({rabbit_quorum_memory_manager,
+ [{description, "quorum memory manager"},
+ {mfa, {?MODULE, register, []}},
+ {cleanup, {?MODULE, unregister, []}},
+ {requires, rabbit_event},
+ {enables, recovery}]}).
+
+register() ->
+ gen_event:add_handler(rabbit_alarm, ?MODULE, []).
+
+unregister() ->
+ gen_event:delete_handler(rabbit_alarm, ?MODULE, []).
+
+init([]) ->
+ {ok, #state{interval = interval()}}.
+
+handle_call( _, State) ->
+ {ok, ok, State}.
+
+handle_event({set_alarm, {{resource_limit, memory, Node}, []}},
+ #state{last_roll_over = undefined} = State) when Node == node() ->
+ {ok, force_roll_over(State)};
+handle_event({set_alarm, {{resource_limit, memory, Node}, []}},
+ #state{last_roll_over = Last, interval = Interval } = State)
+ when Node == node() ->
+ Now = erlang:system_time(millisecond),
+ case Now > (Last + Interval) of
+ true ->
+ {ok, force_roll_over(State)};
+ false ->
+ {ok, State}
+ end;
+handle_event(_, State) ->
+ {ok, State}.
+
+handle_info(_, State) ->
+ {ok, State}.
+
+terminate(_, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+force_roll_over(State) ->
+ ra_log_wal:force_roll_over(ra_log_wal),
+ State#state{last_roll_over = erlang:system_time(millisecond)}.
+
+interval() ->
+ application:get_env(rabbit, min_wal_roll_over_interval, 20000).
diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl
new file mode 100644
index 0000000000..95cc93d728
--- /dev/null
+++ b/deps/rabbit/src/rabbit_quorum_queue.erl
@@ -0,0 +1,1523 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_quorum_queue).
+
+-behaviour(rabbit_queue_type).
+
+-export([init/1,
+ close/1,
+ update/2,
+ handle_event/2]).
+-export([is_recoverable/1, recover/2, stop/1, delete/4, delete_immediately/2]).
+-export([state_info/1, info/2, stat/1, infos/1]).
+-export([settle/4, dequeue/4, consume/3, cancel/5]).
+-export([credit/4]).
+-export([purge/1]).
+-export([stateless_deliver/2, deliver/3, deliver/2]).
+-export([dead_letter_publish/4]).
+-export([queue_name/1]).
+-export([cluster_state/1, status/2]).
+-export([update_consumer_handler/8, update_consumer/9]).
+-export([cancel_consumer_handler/2, cancel_consumer/3]).
+-export([become_leader/2, handle_tick/3, spawn_deleter/1]).
+-export([rpc_delete_metrics/1]).
+-export([format/1]).
+-export([open_files/1]).
+-export([peek/2, peek/3]).
+-export([add_member/4]).
+-export([delete_member/3]).
+-export([requeue/3]).
+-export([policy_changed/1]).
+-export([format_ra_event/3]).
+-export([cleanup_data_dir/0]).
+-export([shrink_all/1,
+ grow/4]).
+-export([transfer_leadership/2, get_replicas/1, queue_length/1]).
+-export([file_handle_leader_reservation/1, file_handle_other_reservation/0]).
+-export([file_handle_release_reservation/0]).
+-export([list_with_minimum_quorum/0, list_with_minimum_quorum_for_cli/0,
+ filter_quorum_critical/1, filter_quorum_critical/2,
+ all_replica_states/0]).
+-export([capabilities/0]).
+-export([repair_amqqueue_nodes/1,
+ repair_amqqueue_nodes/2
+ ]).
+-export([reclaim_memory/2]).
+
+-export([is_enabled/0,
+ declare/2]).
+
+-import(rabbit_queue_type_util, [args_policy_lookup/3,
+ qname_to_internal_name/1]).
+
+-include_lib("stdlib/include/qlc.hrl").
+-include("rabbit.hrl").
+-include("amqqueue.hrl").
+
+-type msg_id() :: non_neg_integer().
+-type qmsg() :: {rabbit_types:r('queue'), pid(), msg_id(), boolean(), rabbit_types:message()}.
+
+-define(STATISTICS_KEYS,
+ [policy,
+ operator_policy,
+ effective_policy_definition,
+ consumers,
+ memory,
+ state,
+ garbage_collection,
+ leader,
+ online,
+ members,
+ open_files,
+ single_active_consumer_pid,
+ single_active_consumer_ctag,
+ messages_ram,
+ message_bytes_ram
+ ]).
+
+-define(INFO_KEYS, [name, durable, auto_delete, arguments, pid, messages, messages_ready,
+ messages_unacknowledged, local_state, type] ++ ?STATISTICS_KEYS).
+
+-define(RPC_TIMEOUT, 1000).
+-define(TICK_TIMEOUT, 5000). %% the ra server tick time
+-define(DELETE_TIMEOUT, 5000).
+-define(ADD_MEMBER_TIMEOUT, 5000).
+
+%%----------- rabbit_queue_type ---------------------------------------------
+
+-spec is_enabled() -> boolean().
+is_enabled() ->
+ rabbit_feature_flags:is_enabled(quorum_queue).
+
+%%----------------------------------------------------------------------------
+
+-spec init(amqqueue:amqqueue()) -> rabbit_fifo_client:state().
+init(Q) when ?is_amqqueue(Q) ->
+ {ok, SoftLimit} = application:get_env(rabbit, quorum_commands_soft_limit),
+ %% This lookup could potentially return an {error, not_found}, but we do not
+ %% know what to do if the queue has `disappeared`. Let it crash.
+ {Name, _LeaderNode} = Leader = amqqueue:get_pid(Q),
+ Nodes = get_nodes(Q),
+ QName = amqqueue:get_name(Q),
+ %% Ensure the leader is listed first
+ Servers0 = [{Name, N} || N <- Nodes],
+ Servers = [Leader | lists:delete(Leader, Servers0)],
+ rabbit_fifo_client:init(QName, Servers, SoftLimit,
+ fun() -> credit_flow:block(Name) end,
+ fun() -> credit_flow:unblock(Name), ok end).
+
+-spec close(rabbit_fifo_client:state()) -> ok.
+close(_State) ->
+ ok.
+
+-spec update(amqqueue:amqqueue(), rabbit_fifo_client:state()) ->
+ rabbit_fifo_client:state().
+update(Q, State) when ?amqqueue_is_quorum(Q) ->
+ %% QQ state maintains it's own updates
+ State.
+
+-spec handle_event({amqqueue:ra_server_id(), any()},
+ rabbit_fifo_client:state()) ->
+ {ok, rabbit_fifo_client:state(), rabbit_queue_type:actions()} |
+ eol |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+handle_event({From, Evt}, QState) ->
+ rabbit_fifo_client:handle_ra_event(From, Evt, QState).
+
+-spec declare(amqqueue:amqqueue(), node()) ->
+ {new | existing, amqqueue:amqqueue()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(Q, _Node) when ?amqqueue_is_quorum(Q) ->
+ case rabbit_queue_type_util:run_checks(
+ [fun rabbit_queue_type_util:check_auto_delete/1,
+ fun rabbit_queue_type_util:check_exclusive/1,
+ fun rabbit_queue_type_util:check_non_durable/1],
+ Q) of
+ ok ->
+ start_cluster(Q);
+ Err ->
+ Err
+ end.
+
+start_cluster(Q) ->
+ QName = amqqueue:get_name(Q),
+ Durable = amqqueue:is_durable(Q),
+ AutoDelete = amqqueue:is_auto_delete(Q),
+ Arguments = amqqueue:get_arguments(Q),
+ Opts = amqqueue:get_options(Q),
+ ActingUser = maps:get(user, Opts, ?UNKNOWN_USER),
+ QuorumSize = get_default_quorum_initial_group_size(Arguments),
+ RaName = qname_to_internal_name(QName),
+ Id = {RaName, node()},
+ Nodes = select_quorum_nodes(QuorumSize, rabbit_mnesia:cluster_nodes(all)),
+ NewQ0 = amqqueue:set_pid(Q, Id),
+ NewQ1 = amqqueue:set_type_state(NewQ0, #{nodes => Nodes}),
+ case rabbit_amqqueue:internal_declare(NewQ1, false) of
+ {created, NewQ} ->
+ TickTimeout = application:get_env(rabbit, quorum_tick_interval, ?TICK_TIMEOUT),
+ RaConfs = [make_ra_conf(NewQ, ServerId, TickTimeout)
+ || ServerId <- members(NewQ)],
+ case ra:start_cluster(RaConfs) of
+ {ok, _, _} ->
+ %% TODO: handle error - what should be done if the
+ %% config cannot be updated
+ ok = rabbit_fifo_client:update_machine_state(Id,
+ ra_machine_config(NewQ)),
+ %% force a policy change to ensure the latest config is
+ %% updated even when running the machine version from 0
+ rabbit_event:notify(queue_created,
+ [{name, QName},
+ {durable, Durable},
+ {auto_delete, AutoDelete},
+ {arguments, Arguments},
+ {user_who_performed_action,
+ ActingUser}]),
+ {new, NewQ};
+ {error, Error} ->
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ {protocol_error, internal_error,
+ "Cannot declare a queue '~s' on node '~s': ~255p",
+ [rabbit_misc:rs(QName), node(), Error]}
+ end;
+ {existing, _} = Ex ->
+ Ex
+ end.
+
+ra_machine(Q) ->
+ {module, rabbit_fifo, ra_machine_config(Q)}.
+
+ra_machine_config(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ {Name, _} = amqqueue:get_pid(Q),
+ %% take the minimum value of the policy and the queue arg if present
+ MaxLength = args_policy_lookup(<<"max-length">>, fun min/2, Q),
+ %% prefer the policy defined strategy if available
+ Overflow = args_policy_lookup(<<"overflow">>, fun (A, _B) -> A end , Q),
+ MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q),
+ MaxMemoryLength = args_policy_lookup(<<"max-in-memory-length">>, fun min/2, Q),
+ MaxMemoryBytes = args_policy_lookup(<<"max-in-memory-bytes">>, fun min/2, Q),
+ DeliveryLimit = args_policy_lookup(<<"delivery-limit">>, fun min/2, Q),
+ Expires = args_policy_lookup(<<"expires">>,
+ fun (A, _B) -> A end,
+ Q),
+ #{name => Name,
+ queue_resource => QName,
+ dead_letter_handler => dlx_mfa(Q),
+ become_leader_handler => {?MODULE, become_leader, [QName]},
+ max_length => MaxLength,
+ max_bytes => MaxBytes,
+ max_in_memory_length => MaxMemoryLength,
+ max_in_memory_bytes => MaxMemoryBytes,
+ single_active_consumer_on => single_active_consumer_on(Q),
+ delivery_limit => DeliveryLimit,
+ overflow_strategy => overflow(Overflow, drop_head, QName),
+ created => erlang:system_time(millisecond),
+ expires => Expires
+ }.
+
+single_active_consumer_on(Q) ->
+ QArguments = amqqueue:get_arguments(Q),
+ case rabbit_misc:table_lookup(QArguments, <<"x-single-active-consumer">>) of
+ {bool, true} -> true;
+ _ -> false
+ end.
+
+update_consumer_handler(QName, {ConsumerTag, ChPid}, Exclusive, AckRequired, Prefetch, Active, ActivityStatus, Args) ->
+ local_or_remote_handler(ChPid, rabbit_quorum_queue, update_consumer,
+ [QName, ChPid, ConsumerTag, Exclusive, AckRequired, Prefetch, Active, ActivityStatus, Args]).
+
+update_consumer(QName, ChPid, ConsumerTag, Exclusive, AckRequired, Prefetch, Active, ActivityStatus, Args) ->
+ catch rabbit_core_metrics:consumer_updated(ChPid, ConsumerTag, Exclusive, AckRequired,
+ QName, Prefetch, Active, ActivityStatus, Args).
+
+cancel_consumer_handler(QName, {ConsumerTag, ChPid}) ->
+ local_or_remote_handler(ChPid, rabbit_quorum_queue, cancel_consumer,
+ [QName, ChPid, ConsumerTag]).
+
+cancel_consumer(QName, ChPid, ConsumerTag) ->
+ catch rabbit_core_metrics:consumer_deleted(ChPid, ConsumerTag, QName),
+ emit_consumer_deleted(ChPid, ConsumerTag, QName, ?INTERNAL_USER).
+
+local_or_remote_handler(ChPid, Module, Function, Args) ->
+ Node = node(ChPid),
+ case Node == node() of
+ true ->
+ erlang:apply(Module, Function, Args);
+ false ->
+ %% this could potentially block for a while if the node is
+ %% in disconnected state or tcp buffers are full
+ rpc:cast(Node, Module, Function, Args)
+ end.
+
+become_leader(QName, Name) ->
+ Fun = fun (Q1) ->
+ amqqueue:set_state(
+ amqqueue:set_pid(Q1, {Name, node()}),
+ live)
+ end,
+ %% as this function is called synchronously when a ra node becomes leader
+ %% we need to ensure there is no chance of blocking as else the ra node
+ %% may not be able to establish it's leadership
+ spawn(fun() ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ rabbit_amqqueue:update(QName, Fun)
+ end),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q0} when ?is_amqqueue(Q0) ->
+ Nodes = get_nodes(Q0),
+ [rpc:call(Node, ?MODULE, rpc_delete_metrics,
+ [QName], ?RPC_TIMEOUT)
+ || Node <- Nodes, Node =/= node()];
+ _ ->
+ ok
+ end
+ end).
+
+-spec all_replica_states() -> {node(), #{atom() => atom()}}.
+all_replica_states() ->
+ Rows = ets:tab2list(ra_state),
+ {node(), maps:from_list(Rows)}.
+
+-spec list_with_minimum_quorum() -> [amqqueue:amqqueue()].
+list_with_minimum_quorum() ->
+ filter_quorum_critical(
+ rabbit_amqqueue:list_local_quorum_queues()).
+
+-spec list_with_minimum_quorum_for_cli() -> [#{binary() => term()}].
+list_with_minimum_quorum_for_cli() ->
+ QQs = list_with_minimum_quorum(),
+ [begin
+ #resource{name = Name} = amqqueue:get_name(Q),
+ #{
+ <<"readable_name">> => rabbit_data_coercion:to_binary(rabbit_misc:rs(amqqueue:get_name(Q))),
+ <<"name">> => Name,
+ <<"virtual_host">> => amqqueue:get_vhost(Q),
+ <<"type">> => <<"quorum">>
+ }
+ end || Q <- QQs].
+
+-spec filter_quorum_critical([amqqueue:amqqueue()]) -> [amqqueue:amqqueue()].
+filter_quorum_critical(Queues) ->
+ %% Example map of QQ replica states:
+ %% #{rabbit@warp10 =>
+ %% #{'%2F_qq.636' => leader,'%2F_qq.243' => leader,
+ %% '%2F_qq.1939' => leader,'%2F_qq.1150' => leader,
+ %% '%2F_qq.1109' => leader,'%2F_qq.1654' => leader,
+ %% '%2F_qq.1679' => leader,'%2F_qq.1003' => leader,
+ %% '%2F_qq.1593' => leader,'%2F_qq.1765' => leader,
+ %% '%2F_qq.933' => leader,'%2F_qq.38' => leader,
+ %% '%2F_qq.1357' => leader,'%2F_qq.1345' => leader,
+ %% '%2F_qq.1694' => leader,'%2F_qq.994' => leader,
+ %% '%2F_qq.490' => leader,'%2F_qq.1704' => leader,
+ %% '%2F_qq.58' => leader,'%2F_qq.564' => leader,
+ %% '%2F_qq.683' => leader,'%2F_qq.386' => leader,
+ %% '%2F_qq.753' => leader,'%2F_qq.6' => leader,
+ %% '%2F_qq.1590' => leader,'%2F_qq.1363' => leader,
+ %% '%2F_qq.882' => leader,'%2F_qq.1161' => leader,...}}
+ ReplicaStates = maps:from_list(
+ rabbit_misc:append_rpc_all_nodes(rabbit_nodes:all_running(),
+ ?MODULE, all_replica_states, [])),
+ filter_quorum_critical(Queues, ReplicaStates).
+
+-spec filter_quorum_critical([amqqueue:amqqueue()], #{node() => #{atom() => atom()}}) -> [amqqueue:amqqueue()].
+
+filter_quorum_critical(Queues, ReplicaStates) ->
+ lists:filter(fun (Q) ->
+ MemberNodes = rabbit_amqqueue:get_quorum_nodes(Q),
+ {Name, _Node} = amqqueue:get_pid(Q),
+ AllUp = lists:filter(fun (N) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ case maps:get(N, ReplicaStates, undefined) of
+ #{Name := State} when State =:= follower orelse State =:= leader ->
+ true;
+ _ -> false
+ end
+ end, MemberNodes),
+ MinQuorum = length(MemberNodes) div 2 + 1,
+ length(AllUp) =< MinQuorum
+ end, Queues).
+
+capabilities() ->
+ #{policies => [<<"max-length">>, <<"max-length-bytes">>, <<"overflow">>,
+ <<"expires">>, <<"max-in-memory-length">>, <<"max-in-memory-bytes">>,
+ <<"delivery-limit">>, <<"dead-letter-exchange">>, <<"dead-letter-routing-key">>],
+ queue_arguments => [<<"x-expires">>, <<"x-dead-letter-exchange">>,
+ <<"x-dead-letter-routing-key">>, <<"x-max-length">>,
+ <<"x-max-length-bytes">>, <<"x-max-in-memory-length">>,
+ <<"x-max-in-memory-bytes">>, <<"x-overflow">>,
+ <<"x-single-active-consumer">>, <<"x-queue-type">>,
+ <<"x-quorum-initial-group-size">>, <<"x-delivery-limit">>],
+ consumer_arguments => [<<"x-priority">>, <<"x-credit">>],
+ server_named => false}.
+
+rpc_delete_metrics(QName) ->
+ ets:delete(queue_coarse_metrics, QName),
+ ets:delete(queue_metrics, QName),
+ ok.
+
+spawn_deleter(QName) ->
+ spawn(fun () ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ delete(Q, false, false, <<"expired">>)
+ end).
+
+handle_tick(QName,
+ {Name, MR, MU, M, C, MsgBytesReady, MsgBytesUnack},
+ Nodes) ->
+ %% this makes calls to remote processes so cannot be run inside the
+ %% ra server
+ Self = self(),
+ _ = spawn(fun() ->
+ R = reductions(Name),
+ rabbit_core_metrics:queue_stats(QName, MR, MU, M, R),
+ Util = case C of
+ 0 -> 0;
+ _ -> rabbit_fifo:usage(Name)
+ end,
+ Infos = [{consumers, C},
+ {consumer_utilisation, Util},
+ {message_bytes_ready, MsgBytesReady},
+ {message_bytes_unacknowledged, MsgBytesUnack},
+ {message_bytes, MsgBytesReady + MsgBytesUnack},
+ {message_bytes_persistent, MsgBytesReady + MsgBytesUnack},
+ {messages_persistent, M}
+
+ | infos(QName, ?STATISTICS_KEYS -- [consumers])],
+ rabbit_core_metrics:queue_stats(QName, Infos),
+ rabbit_event:notify(queue_stats,
+ Infos ++ [{name, QName},
+ {messages, M},
+ {messages_ready, MR},
+ {messages_unacknowledged, MU},
+ {reductions, R}]),
+ ok = repair_leader_record(QName, Self),
+ ExpectedNodes = rabbit_mnesia:cluster_nodes(all),
+ case Nodes -- ExpectedNodes of
+ [] ->
+ ok;
+ Stale ->
+ rabbit_log:info("~s: stale nodes detected. Purging ~w~n",
+ [rabbit_misc:rs(QName), Stale]),
+ %% pipeline purge command
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ ok = ra:pipeline_command(amqqueue:get_pid(Q),
+ rabbit_fifo:make_purge_nodes(Stale)),
+
+ ok
+ end
+ end),
+ ok.
+
+repair_leader_record(QName, Self) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ Node = node(),
+ case amqqueue:get_pid(Q) of
+ {_, Node} ->
+ %% it's ok - we don't need to do anything
+ ok;
+ _ ->
+ rabbit_log:debug("~s: repairing leader record",
+ [rabbit_misc:rs(QName)]),
+ {_, Name} = erlang:process_info(Self, registered_name),
+ become_leader(QName, Name)
+ end,
+ ok.
+
+repair_amqqueue_nodes(VHost, QueueName) ->
+ QName = #resource{virtual_host = VHost, name = QueueName, kind = queue},
+ repair_amqqueue_nodes(QName).
+
+-spec repair_amqqueue_nodes(rabbit_types:r('queue') | amqqueue:amqqueue()) ->
+ ok | repaired.
+repair_amqqueue_nodes(QName = #resource{}) ->
+ {ok, Q0} = rabbit_amqqueue:lookup(QName),
+ repair_amqqueue_nodes(Q0);
+repair_amqqueue_nodes(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ Leader = amqqueue:get_pid(Q0),
+ {ok, Members, _} = ra:members(Leader),
+ RaNodes = [N || {_, N} <- Members],
+ #{nodes := Nodes} = amqqueue:get_type_state(Q0),
+ case lists:sort(RaNodes) =:= lists:sort(Nodes) of
+ true ->
+ %% up to date
+ ok;
+ false ->
+ %% update amqqueue record
+ Fun = fun (Q) ->
+ TS0 = amqqueue:get_type_state(Q),
+ TS = TS0#{nodes => RaNodes},
+ amqqueue:set_type_state(Q, TS)
+ end,
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ rabbit_amqqueue:update(QName, Fun)
+ end),
+ repaired
+ end.
+
+reductions(Name) ->
+ try
+ {reductions, R} = process_info(whereis(Name), reductions),
+ R
+ catch
+ error:badarg ->
+ 0
+ end.
+
+is_recoverable(Q) ->
+ Node = node(),
+ Nodes = get_nodes(Q),
+ lists:member(Node, Nodes).
+
+-spec recover(binary(), [amqqueue:amqqueue()]) ->
+ {[amqqueue:amqqueue()], [amqqueue:amqqueue()]}.
+recover(_Vhost, Queues) ->
+ lists:foldl(
+ fun (Q0, {R0, F0}) ->
+ {Name, _} = amqqueue:get_pid(Q0),
+ QName = amqqueue:get_name(Q0),
+ Nodes = get_nodes(Q0),
+ Formatter = {?MODULE, format_ra_event, [QName]},
+ Res = case ra:restart_server({Name, node()},
+ #{ra_event_formatter => Formatter}) of
+ ok ->
+ % queue was restarted, good
+ ok;
+ {error, Err1}
+ when Err1 == not_started orelse
+ Err1 == name_not_registered ->
+ % queue was never started on this node
+ % so needs to be started from scratch.
+ Machine = ra_machine(Q0),
+ RaNodes = [{Name, Node} || Node <- Nodes],
+ case ra:start_server(Name, {Name, node()}, Machine, RaNodes) of
+ ok -> ok;
+ Err2 ->
+ rabbit_log:warning("recover: quorum queue ~w could not"
+ " be started ~w", [Name, Err2]),
+ fail
+ end;
+ {error, {already_started, _}} ->
+ %% this is fine and can happen if a vhost crashes and performs
+ %% recovery whilst the ra application and servers are still
+ %% running
+ ok;
+ Err ->
+ %% catch all clause to avoid causing the vhost not to start
+ rabbit_log:warning("recover: quorum queue ~w could not be "
+ "restarted ~w", [Name, Err]),
+ fail
+ end,
+ %% we have to ensure the quorum queue is
+ %% present in the rabbit_queue table and not just in
+ %% rabbit_durable_queue
+ %% So many code paths are dependent on this.
+ {ok, Q} = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Q0),
+ case Res of
+ ok ->
+ {[Q | R0], F0};
+ fail ->
+ {R0, [Q | F0]}
+ end
+ end, {[], []}, Queues).
+
+-spec stop(rabbit_types:vhost()) -> ok.
+stop(VHost) ->
+ _ = [begin
+ Pid = amqqueue:get_pid(Q),
+ ra:stop_server(Pid)
+ end || Q <- find_quorum_queues(VHost)],
+ ok.
+
+-spec delete(amqqueue:amqqueue(),
+ boolean(), boolean(),
+ rabbit_types:username()) ->
+ {ok, QLen :: non_neg_integer()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+delete(Q, true, _IfEmpty, _ActingUser) when ?amqqueue_is_quorum(Q) ->
+ {protocol_error, not_implemented,
+ "cannot delete ~s. queue.delete operations with if-unused flag set are not supported by quorum queues",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]};
+delete(Q, _IfUnused, true, _ActingUser) when ?amqqueue_is_quorum(Q) ->
+ {protocol_error, not_implemented,
+ "cannot delete ~s. queue.delete operations with if-empty flag set are not supported by quorum queues",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]};
+delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ QName = amqqueue:get_name(Q),
+ QNodes = get_nodes(Q),
+ %% TODO Quorum queue needs to support consumer tracking for IfUnused
+ Timeout = ?DELETE_TIMEOUT,
+ {ok, ReadyMsgs, _} = stat(Q),
+ Servers = [{Name, Node} || Node <- QNodes],
+ case ra:delete_cluster(Servers, Timeout) of
+ {ok, {_, LeaderNode} = Leader} ->
+ MRef = erlang:monitor(process, Leader),
+ receive
+ {'DOWN', MRef, process, _, _} ->
+ ok
+ after Timeout ->
+ ok = force_delete_queue(Servers)
+ end,
+ ok = delete_queue_data(QName, ActingUser),
+ rpc:call(LeaderNode, rabbit_core_metrics, queue_deleted, [QName],
+ ?RPC_TIMEOUT),
+ {ok, ReadyMsgs};
+ {error, {no_more_servers_to_try, Errs}} ->
+ case lists:all(fun({{error, noproc}, _}) -> true;
+ (_) -> false
+ end, Errs) of
+ true ->
+ %% If all ra nodes were already down, the delete
+ %% has succeed
+ delete_queue_data(QName, ActingUser),
+ {ok, ReadyMsgs};
+ false ->
+ %% attempt forced deletion of all servers
+ rabbit_log:warning(
+ "Could not delete quorum queue '~s', not enough nodes "
+ " online to reach a quorum: ~255p."
+ " Attempting force delete.",
+ [rabbit_misc:rs(QName), Errs]),
+ ok = force_delete_queue(Servers),
+ delete_queue_data(QName, ActingUser),
+ {ok, ReadyMsgs}
+ end
+ end.
+
+force_delete_queue(Servers) ->
+ [begin
+ case catch(ra:force_delete_server(S)) of
+ ok -> ok;
+ Err ->
+ rabbit_log:warning(
+ "Force delete of ~w failed with: ~w"
+ "This may require manual data clean up~n",
+ [S, Err]),
+ ok
+ end
+ end || S <- Servers],
+ ok.
+
+delete_queue_data(QName, ActingUser) ->
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ ok.
+
+
+delete_immediately(Resource, {_Name, _} = QPid) ->
+ _ = rabbit_amqqueue:internal_delete(Resource, ?INTERNAL_USER),
+ {ok, _} = ra:delete_cluster([QPid]),
+ rabbit_core_metrics:queue_deleted(Resource),
+ ok.
+
+settle(complete, CTag, MsgIds, QState) ->
+ rabbit_fifo_client:settle(quorum_ctag(CTag), MsgIds, QState);
+settle(requeue, CTag, MsgIds, QState) ->
+ rabbit_fifo_client:return(quorum_ctag(CTag), MsgIds, QState);
+settle(discard, CTag, MsgIds, QState) ->
+ rabbit_fifo_client:discard(quorum_ctag(CTag), MsgIds, QState).
+
+credit(CTag, Credit, Drain, QState) ->
+ rabbit_fifo_client:credit(quorum_ctag(CTag), Credit, Drain, QState).
+
+-spec dequeue(NoAck :: boolean(), pid(),
+ rabbit_types:ctag(), rabbit_fifo_client:state()) ->
+ {empty, rabbit_fifo_client:state()} |
+ {ok, QLen :: non_neg_integer(), qmsg(), rabbit_fifo_client:state()} |
+ {error, term()}.
+dequeue(NoAck, _LimiterPid, CTag0, QState0) ->
+ CTag = quorum_ctag(CTag0),
+ Settlement = case NoAck of
+ true ->
+ settled;
+ false ->
+ unsettled
+ end,
+ rabbit_fifo_client:dequeue(CTag, Settlement, QState0).
+
+-spec consume(amqqueue:amqqueue(),
+ rabbit_queue_type:consume_spec(),
+ rabbit_fifo_client:state()) ->
+ {ok, rabbit_fifo_client:state(), rabbit_queue_type:actions()} |
+ {error, global_qos_not_supported_for_queue_type}.
+consume(Q, #{limiter_active := true}, _State)
+ when ?amqqueue_is_quorum(Q) ->
+ {error, global_qos_not_supported_for_queue_type};
+consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) ->
+ #{no_ack := NoAck,
+ channel_pid := ChPid,
+ prefetch_count := ConsumerPrefetchCount,
+ consumer_tag := ConsumerTag0,
+ exclusive_consume := ExclusiveConsume,
+ args := Args,
+ ok_msg := OkMsg,
+ acting_user := ActingUser} = Spec,
+ %% TODO: validate consumer arguments
+ %% currently quorum queues do not support any arguments
+ QName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ maybe_send_reply(ChPid, OkMsg),
+ ConsumerTag = quorum_ctag(ConsumerTag0),
+ %% A prefetch count of 0 means no limitation,
+ %% let's make it into something large for ra
+ Prefetch0 = case ConsumerPrefetchCount of
+ 0 -> 2000;
+ Other -> Other
+ end,
+ %% consumer info is used to describe the consumer properties
+ AckRequired = not NoAck,
+ ConsumerMeta = #{ack => AckRequired,
+ prefetch => ConsumerPrefetchCount,
+ args => Args,
+ username => ActingUser},
+
+ {CreditMode, Credit, Drain} = parse_credit_args(Prefetch0, Args),
+ %% if the mode is credited we should send a separate credit command
+ %% after checkout and give 0 credits initally
+ Prefetch = case CreditMode of
+ credited -> 0;
+ simple_prefetch -> Prefetch0
+ end,
+ {ok, QState1} = rabbit_fifo_client:checkout(ConsumerTag, Prefetch,
+ CreditMode, ConsumerMeta,
+ QState0),
+ QState = case CreditMode of
+ credited when Credit > 0 ->
+ rabbit_fifo_client:credit(ConsumerTag, Credit, Drain,
+ QState1);
+ _ -> QState1
+ end,
+ case ra:local_query(QPid,
+ fun rabbit_fifo:query_single_active_consumer/1) of
+ {ok, {_, SacResult}, _} ->
+ SingleActiveConsumerOn = single_active_consumer_on(Q),
+ {IsSingleActiveConsumer, ActivityStatus} = case {SingleActiveConsumerOn, SacResult} of
+ {false, _} ->
+ {true, up};
+ {true, {value, {ConsumerTag, ChPid}}} ->
+ {true, single_active};
+ _ ->
+ {false, waiting}
+ end,
+ rabbit_core_metrics:consumer_created(
+ ChPid, ConsumerTag, ExclusiveConsume,
+ AckRequired, QName,
+ ConsumerPrefetchCount, IsSingleActiveConsumer,
+ ActivityStatus, Args),
+ emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
+ AckRequired, QName, Prefetch,
+ Args, none, ActingUser),
+ {ok, QState, []};
+ {error, Error} ->
+ Error;
+ {timeout, _} ->
+ {error, timeout}
+ end.
+
+% -spec basic_cancel(rabbit_types:ctag(), ChPid :: pid(), any(), rabbit_fifo_client:state()) ->
+% {'ok', rabbit_fifo_client:state()}.
+
+cancel(_Q, ConsumerTag, OkMsg, _ActingUser, State) ->
+ maybe_send_reply(self(), OkMsg),
+ rabbit_fifo_client:cancel_checkout(quorum_ctag(ConsumerTag), State).
+
+emit_consumer_created(ChPid, CTag, Exclusive, AckRequired, QName, PrefetchCount, Args, Ref, ActingUser) ->
+ rabbit_event:notify(consumer_created,
+ [{consumer_tag, CTag},
+ {exclusive, Exclusive},
+ {ack_required, AckRequired},
+ {channel, ChPid},
+ {queue, QName},
+ {prefetch_count, PrefetchCount},
+ {arguments, Args},
+ {user_who_performed_action, ActingUser}],
+ Ref).
+
+emit_consumer_deleted(ChPid, ConsumerTag, QName, ActingUser) ->
+ rabbit_event:notify(consumer_deleted,
+ [{consumer_tag, ConsumerTag},
+ {channel, ChPid},
+ {queue, QName},
+ {user_who_performed_action, ActingUser}]).
+
+-spec stateless_deliver(amqqueue:ra_server_id(), rabbit_types:delivery()) -> 'ok'.
+
+stateless_deliver(ServerId, Delivery) ->
+ ok = rabbit_fifo_client:untracked_enqueue([ServerId],
+ Delivery#delivery.message).
+
+-spec deliver(Confirm :: boolean(), rabbit_types:delivery(),
+ rabbit_fifo_client:state()) ->
+ {ok | slow, rabbit_fifo_client:state()} |
+ {reject_publish, rabbit_fifo_client:state()}.
+deliver(false, Delivery, QState0) ->
+ case rabbit_fifo_client:enqueue(Delivery#delivery.message, QState0) of
+ {ok, _} = Res -> Res;
+ {slow, _} = Res -> Res;
+ {reject_publish, State} ->
+ {ok, State}
+ end;
+deliver(true, Delivery, QState0) ->
+ rabbit_fifo_client:enqueue(Delivery#delivery.msg_seq_no,
+ Delivery#delivery.message, QState0).
+
+deliver(QSs, #delivery{confirm = Confirm} = Delivery) ->
+ lists:foldl(
+ fun({Q, stateless}, {Qs, Actions}) ->
+ QRef = amqqueue:get_pid(Q),
+ ok = rabbit_fifo_client:untracked_enqueue(
+ [QRef], Delivery#delivery.message),
+ {Qs, Actions};
+ ({Q, S0}, {Qs, Actions}) ->
+ case deliver(Confirm, Delivery, S0) of
+ {reject_publish, S} ->
+ Seq = Delivery#delivery.msg_seq_no,
+ QName = rabbit_fifo_client:cluster_name(S),
+ {[{Q, S} | Qs], [{rejected, QName, [Seq]} | Actions]};
+ {_, S} ->
+ {[{Q, S} | Qs], Actions}
+ end
+ end, {[], []}, QSs).
+
+
+state_info(S) ->
+ #{pending_raft_commands => rabbit_fifo_client:pending_size(S)}.
+
+
+
+-spec infos(rabbit_types:r('queue')) -> rabbit_types:infos().
+infos(QName) ->
+ infos(QName, ?STATISTICS_KEYS).
+
+infos(QName, Keys) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ info(Q, Keys);
+ {error, not_found} ->
+ []
+ end.
+
+info(Q, all_keys) ->
+ info(Q, ?INFO_KEYS);
+info(Q, Items) ->
+ lists:foldr(fun(totals, Acc) ->
+ i_totals(Q) ++ Acc;
+ (type_specific, Acc) ->
+ format(Q) ++ Acc;
+ (Item, Acc) ->
+ [{Item, i(Item, Q)} | Acc]
+ end, [], Items).
+
+-spec stat(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer(), non_neg_integer()}.
+stat(Q) when ?is_amqqueue(Q) ->
+ %% same short default timeout as in rabbit_fifo_client:stat/1
+ stat(Q, 250).
+
+-spec stat(amqqueue:amqqueue(), non_neg_integer()) -> {'ok', non_neg_integer(), non_neg_integer()}.
+
+stat(Q, Timeout) when ?is_amqqueue(Q) ->
+ Leader = amqqueue:get_pid(Q),
+ try
+ case rabbit_fifo_client:stat(Leader, Timeout) of
+ {ok, _, _} = Success -> Success;
+ {error, _} -> {ok, 0, 0};
+ {timeout, _} -> {ok, 0, 0}
+ end
+ catch
+ _:_ ->
+ %% Leader is not available, cluster might be in minority
+ {ok, 0, 0}
+ end.
+
+-spec purge(amqqueue:amqqueue()) ->
+ {ok, non_neg_integer()}.
+purge(Q) when ?is_amqqueue(Q) ->
+ Node = amqqueue:get_pid(Q),
+ rabbit_fifo_client:purge(Node).
+
+requeue(ConsumerTag, MsgIds, QState) ->
+ rabbit_fifo_client:return(quorum_ctag(ConsumerTag), MsgIds, QState).
+
+cleanup_data_dir() ->
+ Names = [begin
+ {Name, _} = amqqueue:get_pid(Q),
+ Name
+ end
+ || Q <- rabbit_amqqueue:list_by_type(?MODULE),
+ lists:member(node(), get_nodes(Q))],
+ NoQQClusters = rabbit_ra_registry:list_not_quorum_clusters(),
+ Registered = ra_directory:list_registered(),
+ Running = Names ++ NoQQClusters,
+ _ = [maybe_delete_data_dir(UId) || {Name, UId} <- Registered,
+ not lists:member(Name, Running)],
+ ok.
+
+maybe_delete_data_dir(UId) ->
+ Dir = ra_env:server_data_dir(UId),
+ {ok, Config} = ra_log:read_config(Dir),
+ case maps:get(machine, Config) of
+ {module, rabbit_fifo, _} ->
+ ra_lib:recursive_delete(Dir),
+ ra_directory:unregister_name(UId);
+ _ ->
+ ok
+ end.
+
+policy_changed(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ _ = rabbit_fifo_client:update_machine_state(QPid, ra_machine_config(Q)),
+ ok.
+
+-spec cluster_state(Name :: atom()) -> 'down' | 'recovering' | 'running'.
+
+cluster_state(Name) ->
+ case whereis(Name) of
+ undefined -> down;
+ _ ->
+ case ets:lookup(ra_state, Name) of
+ [{_, recover}] -> recovering;
+ _ -> running
+ end
+ end.
+
+-spec status(rabbit_types:vhost(), Name :: rabbit_misc:resource_name()) ->
+ [[{binary(), term()}]] | {error, term()}.
+status(Vhost, QueueName) ->
+ %% Handle not found queues
+ QName = #resource{virtual_host = Vhost, name = QueueName, kind = queue},
+ RName = qname_to_internal_name(QName),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ Nodes = get_nodes(Q),
+ [begin
+ case get_sys_status({RName, N}) of
+ {ok, Sys} ->
+ {_, M} = lists:keyfind(ra_server_state, 1, Sys),
+ {_, RaftState} = lists:keyfind(raft_state, 1, Sys),
+ #{commit_index := Commit,
+ machine_version := MacVer,
+ current_term := Term,
+ log := #{last_index := Last,
+ snapshot_index := SnapIdx}} = M,
+ [{<<"Node Name">>, N},
+ {<<"Raft State">>, RaftState},
+ {<<"Log Index">>, Last},
+ {<<"Commit Index">>, Commit},
+ {<<"Snapshot Index">>, SnapIdx},
+ {<<"Term">>, Term},
+ {<<"Machine Version">>, MacVer}
+ ];
+ {error, Err} ->
+ [{<<"Node Name">>, N},
+ {<<"Raft State">>, Err},
+ {<<"Log Index">>, <<>>},
+ {<<"Commit Index">>, <<>>},
+ {<<"Snapshot Index">>, <<>>},
+ {<<"Term">>, <<>>},
+ {<<"Machine Version">>, <<>>}
+ ]
+ end
+ end || N <- Nodes];
+ {error, not_found} = E ->
+ E
+ end.
+
+get_sys_status(Proc) ->
+ try lists:nth(5, element(4, sys:get_status(Proc))) of
+ Sys -> {ok, Sys}
+ catch
+ _:Err when is_tuple(Err) ->
+ {error, element(1, Err)};
+ _:_ ->
+ {error, other}
+
+ end.
+
+
+add_member(VHost, Name, Node, Timeout) ->
+ QName = #resource{virtual_host = VHost, name = Name, kind = queue},
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ QNodes = get_nodes(Q),
+ case lists:member(Node, rabbit_nodes:all_running()) of
+ false ->
+ {error, node_not_running};
+ true ->
+ case lists:member(Node, QNodes) of
+ true ->
+ %% idempotent by design
+ ok;
+ false ->
+ add_member(Q, Node, Timeout)
+ end
+ end;
+ {error, not_found} = E ->
+ E
+ end.
+
+add_member(Q, Node, Timeout) when ?amqqueue_is_quorum(Q) ->
+ {RaName, _} = amqqueue:get_pid(Q),
+ QName = amqqueue:get_name(Q),
+ %% TODO parallel calls might crash this, or add a duplicate in quorum_nodes
+ ServerId = {RaName, Node},
+ Members = members(Q),
+ TickTimeout = application:get_env(rabbit, quorum_tick_interval,
+ ?TICK_TIMEOUT),
+ Conf = make_ra_conf(Q, ServerId, TickTimeout),
+ case ra:start_server(Conf) of
+ ok ->
+ case ra:add_member(Members, ServerId, Timeout) of
+ {ok, _, Leader} ->
+ Fun = fun(Q1) ->
+ Q2 = update_type_state(
+ Q1, fun(#{nodes := Nodes} = Ts) ->
+ Ts#{nodes => [Node | Nodes]}
+ end),
+ amqqueue:set_pid(Q2, Leader)
+ end,
+ rabbit_misc:execute_mnesia_transaction(
+ fun() -> rabbit_amqqueue:update(QName, Fun) end),
+ ok;
+ {timeout, _} ->
+ _ = ra:force_delete_server(ServerId),
+ _ = ra:remove_member(Members, ServerId),
+ {error, timeout};
+ E ->
+ _ = ra:force_delete_server(ServerId),
+ E
+ end;
+ E ->
+ E
+ end.
+
+delete_member(VHost, Name, Node) ->
+ QName = #resource{virtual_host = VHost, name = Name, kind = queue},
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ QNodes = get_nodes(Q),
+ case lists:member(Node, QNodes) of
+ false ->
+ %% idempotent by design
+ ok;
+ true ->
+ delete_member(Q, Node)
+ end;
+ {error, not_found} = E ->
+ E
+ end.
+
+
+delete_member(Q, Node) when ?amqqueue_is_quorum(Q) ->
+ QName = amqqueue:get_name(Q),
+ {RaName, _} = amqqueue:get_pid(Q),
+ ServerId = {RaName, Node},
+ case members(Q) of
+ [{_, Node}] ->
+
+ %% deleting the last member is not allowed
+ {error, last_node};
+ Members ->
+ case ra:remove_member(Members, ServerId) of
+ {ok, _, _Leader} ->
+ Fun = fun(Q1) ->
+ update_type_state(
+ Q1,
+ fun(#{nodes := Nodes} = Ts) ->
+ Ts#{nodes => lists:delete(Node, Nodes)}
+ end)
+ end,
+ rabbit_misc:execute_mnesia_transaction(
+ fun() -> rabbit_amqqueue:update(QName, Fun) end),
+ case ra:force_delete_server(ServerId) of
+ ok ->
+ ok;
+ {error, {badrpc, nodedown}} ->
+ ok;
+ {error, {badrpc, {'EXIT', {badarg, _}}}} ->
+ %% DETS/ETS tables can't be found, application isn't running
+ ok;
+ {error, _} = Err ->
+ Err;
+ Err ->
+ {error, Err}
+ end;
+ {timeout, _} ->
+ {error, timeout};
+ E ->
+ E
+ end
+ end.
+
+-spec shrink_all(node()) ->
+ [{rabbit_amqqueue:name(),
+ {ok, pos_integer()} | {error, pos_integer(), term()}}].
+shrink_all(Node) ->
+ [begin
+ QName = amqqueue:get_name(Q),
+ rabbit_log:info("~s: removing member (replica) on node ~w",
+ [rabbit_misc:rs(QName), Node]),
+ Size = length(get_nodes(Q)),
+ case delete_member(Q, Node) of
+ ok ->
+ {QName, {ok, Size-1}};
+ {error, Err} ->
+ rabbit_log:warning("~s: failed to remove member (replica) on node ~w, error: ~w",
+ [rabbit_misc:rs(QName), Node, Err]),
+ {QName, {error, Size, Err}}
+ end
+ end || Q <- rabbit_amqqueue:list(),
+ amqqueue:get_type(Q) == ?MODULE,
+ lists:member(Node, get_nodes(Q))].
+
+-spec grow(node(), binary(), binary(), all | even) ->
+ [{rabbit_amqqueue:name(),
+ {ok, pos_integer()} | {error, pos_integer(), term()}}].
+grow(Node, VhostSpec, QueueSpec, Strategy) ->
+ Running = rabbit_nodes:all_running(),
+ [begin
+ Size = length(get_nodes(Q)),
+ QName = amqqueue:get_name(Q),
+ rabbit_log:info("~s: adding a new member (replica) on node ~w",
+ [rabbit_misc:rs(QName), Node]),
+ case add_member(Q, Node, ?ADD_MEMBER_TIMEOUT) of
+ ok ->
+ {QName, {ok, Size + 1}};
+ {error, Err} ->
+ rabbit_log:warning(
+ "~s: failed to add member (replica) on node ~w, error: ~w",
+ [rabbit_misc:rs(QName), Node, Err]),
+ {QName, {error, Size, Err}}
+ end
+ end
+ || Q <- rabbit_amqqueue:list(),
+ amqqueue:get_type(Q) == ?MODULE,
+ %% don't add a member if there is already one on the node
+ not lists:member(Node, get_nodes(Q)),
+ %% node needs to be running
+ lists:member(Node, Running),
+ matches_strategy(Strategy, get_nodes(Q)),
+ is_match(amqqueue:get_vhost(Q), VhostSpec) andalso
+ is_match(get_resource_name(amqqueue:get_name(Q)), QueueSpec) ].
+
+transfer_leadership(Q, Destination) ->
+ {RaName, _} = Pid = amqqueue:get_pid(Q),
+ case ra:transfer_leadership(Pid, {RaName, Destination}) of
+ ok ->
+ case ra:members(Pid) of
+ {_, _, {_, NewNode}} ->
+ {migrated, NewNode};
+ {timeout, _} ->
+ {not_migrated, ra_members_timeout}
+ end;
+ already_leader ->
+ {not_migrated, already_leader};
+ {error, Reason} ->
+ {not_migrated, Reason};
+ {timeout, _} ->
+ %% TODO should we retry once?
+ {not_migrated, timeout}
+ end.
+
+queue_length(Q) ->
+ Name = amqqueue:get_name(Q),
+ case ets:lookup(ra_metrics, Name) of
+ [] -> 0;
+ [{_, _, SnapIdx, _, _, LastIdx, _}] -> LastIdx - SnapIdx
+ end.
+
+get_replicas(Q) ->
+ get_nodes(Q).
+
+get_resource_name(#resource{name = Name}) ->
+ Name.
+
+matches_strategy(all, _) -> true;
+matches_strategy(even, Members) ->
+ length(Members) rem 2 == 0.
+
+is_match(Subj, E) ->
+ nomatch /= re:run(Subj, E).
+
+file_handle_leader_reservation(QName) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ ClusterSize = length(get_nodes(Q)),
+ file_handle_cache:set_reservation(2 + ClusterSize).
+
+file_handle_other_reservation() ->
+ file_handle_cache:set_reservation(2).
+
+file_handle_release_reservation() ->
+ file_handle_cache:release_reservation().
+
+-spec reclaim_memory(rabbit_types:vhost(), Name :: rabbit_misc:resource_name()) -> ok | {error, term()}.
+reclaim_memory(Vhost, QueueName) ->
+ QName = #resource{virtual_host = Vhost, name = QueueName, kind = queue},
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ ok = ra:pipeline_command(amqqueue:get_pid(Q),
+ rabbit_fifo:make_garbage_collection());
+ {error, not_found} = E ->
+ E
+ end.
+
+%%----------------------------------------------------------------------------
+dlx_mfa(Q) ->
+ DLX = init_dlx(args_policy_lookup(<<"dead-letter-exchange">>,
+ fun res_arg/2, Q), Q),
+ DLXRKey = args_policy_lookup(<<"dead-letter-routing-key">>,
+ fun res_arg/2, Q),
+ {?MODULE, dead_letter_publish, [DLX, DLXRKey, amqqueue:get_name(Q)]}.
+
+init_dlx(undefined, _Q) ->
+ undefined;
+init_dlx(DLX, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ rabbit_misc:r(QName, exchange, DLX).
+
+res_arg(_PolVal, ArgVal) -> ArgVal.
+
+dead_letter_publish(undefined, _, _, _) ->
+ ok;
+dead_letter_publish(X, RK, QName, ReasonMsgs) ->
+ case rabbit_exchange:lookup(X) of
+ {ok, Exchange} ->
+ [rabbit_dead_letter:publish(Msg, Reason, Exchange, RK, QName)
+ || {Reason, Msg} <- ReasonMsgs];
+ {error, not_found} ->
+ ok
+ end.
+
+find_quorum_queues(VHost) ->
+ Node = node(),
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(qlc:q([Q || Q <- mnesia:table(rabbit_durable_queue),
+ ?amqqueue_is_quorum(Q),
+ amqqueue:get_vhost(Q) =:= VHost,
+ amqqueue:qnode(Q) == Node]))
+ end).
+
+i_totals(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, MR, MU, M, _}] ->
+ [{messages_ready, MR},
+ {messages_unacknowledged, MU},
+ {messages, M}];
+ [] ->
+ [{messages_ready, 0},
+ {messages_unacknowledged, 0},
+ {messages, 0}]
+ end.
+
+i(name, Q) when ?is_amqqueue(Q) -> amqqueue:get_name(Q);
+i(durable, Q) when ?is_amqqueue(Q) -> amqqueue:is_durable(Q);
+i(auto_delete, Q) when ?is_amqqueue(Q) -> amqqueue:is_auto_delete(Q);
+i(arguments, Q) when ?is_amqqueue(Q) -> amqqueue:get_arguments(Q);
+i(pid, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ whereis(Name);
+i(messages, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ quorum_messages(QName);
+i(messages_ready, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, MR, _, _, _}] ->
+ MR;
+ [] ->
+ 0
+ end;
+i(messages_unacknowledged, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, _, MU, _, _}] ->
+ MU;
+ [] ->
+ 0
+ end;
+i(policy, Q) ->
+ case rabbit_policy:name(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(operator_policy, Q) ->
+ case rabbit_policy:name_op(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(effective_policy_definition, Q) ->
+ case rabbit_policy:effective_definition(Q) of
+ undefined -> [];
+ Def -> Def
+ end;
+i(consumers, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_metrics, QName) of
+ [{_, M, _}] ->
+ proplists:get_value(consumers, M, 0);
+ [] ->
+ 0
+ end;
+i(memory, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ try
+ {memory, M} = process_info(whereis(Name), memory),
+ M
+ catch
+ error:badarg ->
+ 0
+ end;
+i(state, Q) when ?is_amqqueue(Q) ->
+ {Name, Node} = amqqueue:get_pid(Q),
+ %% Check against the leader or last known leader
+ case rpc:call(Node, ?MODULE, cluster_state, [Name], ?RPC_TIMEOUT) of
+ {badrpc, _} -> down;
+ State -> State
+ end;
+i(local_state, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ case ets:lookup(ra_state, Name) of
+ [{_, State}] -> State;
+ _ -> not_member
+ end;
+i(garbage_collection, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ try
+ rabbit_misc:get_gc_info(whereis(Name))
+ catch
+ error:badarg ->
+ []
+ end;
+i(members, Q) when ?is_amqqueue(Q) ->
+ get_nodes(Q);
+i(online, Q) -> online(Q);
+i(leader, Q) -> leader(Q);
+i(open_files, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ Nodes = get_nodes(Q),
+ {Data, _} = rpc:multicall(Nodes, ?MODULE, open_files, [Name]),
+ lists:flatten(Data);
+i(single_active_consumer_pid, Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid, fun rabbit_fifo:query_single_active_consumer/1) of
+ {ok, {_, {value, {_ConsumerTag, ChPid}}}, _} ->
+ ChPid;
+ {ok, _, _} ->
+ '';
+ {error, _} ->
+ '';
+ {timeout, _} ->
+ ''
+ end;
+i(single_active_consumer_ctag, Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid,
+ fun rabbit_fifo:query_single_active_consumer/1) of
+ {ok, {_, {value, {ConsumerTag, _ChPid}}}, _} ->
+ ConsumerTag;
+ {ok, _, _} ->
+ '';
+ {error, _} ->
+ '';
+ {timeout, _} ->
+ ''
+ end;
+i(type, _) -> quorum;
+i(messages_ram, Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid,
+ fun rabbit_fifo:query_in_memory_usage/1) of
+ {ok, {_, {Length, _}}, _} ->
+ Length;
+ {error, _} ->
+ 0;
+ {timeout, _} ->
+ 0
+ end;
+i(message_bytes_ram, Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid,
+ fun rabbit_fifo:query_in_memory_usage/1) of
+ {ok, {_, {_, Bytes}}, _} ->
+ Bytes;
+ {error, _} ->
+ 0;
+ {timeout, _} ->
+ 0
+ end;
+i(_K, _Q) -> ''.
+
+open_files(Name) ->
+ case whereis(Name) of
+ undefined -> {node(), 0};
+ Pid -> case ets:lookup(ra_open_file_metrics, Pid) of
+ [] -> {node(), 0};
+ [{_, Count}] -> {node(), Count}
+ end
+ end.
+
+leader(Q) when ?is_amqqueue(Q) ->
+ {Name, Leader} = amqqueue:get_pid(Q),
+ case is_process_alive(Name, Leader) of
+ true -> Leader;
+ false -> ''
+ end.
+
+peek(Vhost, Queue, Pos) ->
+ peek(Pos, rabbit_misc:r(Vhost, queue, Queue)).
+
+peek(Pos, #resource{} = QName) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ peek(Pos, Q);
+ Err ->
+ Err
+ end;
+peek(Pos, Q) when ?is_amqqueue(Q) andalso ?amqqueue_is_quorum(Q) ->
+ LeaderPid = amqqueue:get_pid(Q),
+ case ra:aux_command(LeaderPid, {peek, Pos}) of
+ {ok, {MsgHeader, Msg0}} ->
+ Count = case MsgHeader of
+ #{delivery_count := C} -> C;
+ _ -> 0
+ end,
+ Msg = rabbit_basic:add_header(<<"x-delivery-count">>, long,
+ Count, Msg0),
+ {ok, rabbit_basic:peek_fmt_message(Msg)};
+ {error, Err} ->
+ {error, Err};
+ Err ->
+ Err
+ end;
+peek(_Pos, Q) when ?is_amqqueue(Q) andalso ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported}.
+
+online(Q) when ?is_amqqueue(Q) ->
+ Nodes = get_nodes(Q),
+ {Name, _} = amqqueue:get_pid(Q),
+ [Node || Node <- Nodes, is_process_alive(Name, Node)].
+
+format(Q) when ?is_amqqueue(Q) ->
+ Nodes = get_nodes(Q),
+ [{members, Nodes}, {online, online(Q)}, {leader, leader(Q)}].
+
+is_process_alive(Name, Node) ->
+ erlang:is_pid(rpc:call(Node, erlang, whereis, [Name], ?RPC_TIMEOUT)).
+
+-spec quorum_messages(rabbit_amqqueue:name()) -> non_neg_integer().
+
+quorum_messages(QName) ->
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, _, _, M, _}] ->
+ M;
+ [] ->
+ 0
+ end.
+
+quorum_ctag(Int) when is_integer(Int) ->
+ integer_to_binary(Int);
+quorum_ctag(Other) ->
+ Other.
+
+maybe_send_reply(_ChPid, undefined) -> ok;
+maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
+
+queue_name(RaFifoState) ->
+ rabbit_fifo_client:cluster_name(RaFifoState).
+
+get_default_quorum_initial_group_size(Arguments) ->
+ case rabbit_misc:table_lookup(Arguments, <<"x-quorum-initial-group-size">>) of
+ undefined -> application:get_env(rabbit, default_quorum_initial_group_size);
+ {_Type, Val} -> Val
+ end.
+
+select_quorum_nodes(Size, All) when length(All) =< Size ->
+ All;
+select_quorum_nodes(Size, All) ->
+ Node = node(),
+ case lists:member(Node, All) of
+ true ->
+ select_quorum_nodes(Size - 1, lists:delete(Node, All), [Node]);
+ false ->
+ select_quorum_nodes(Size, All, [])
+ end.
+
+select_quorum_nodes(0, _, Selected) ->
+ Selected;
+select_quorum_nodes(Size, Rest, Selected) ->
+ S = lists:nth(rand:uniform(length(Rest)), Rest),
+ select_quorum_nodes(Size - 1, lists:delete(S, Rest), [S | Selected]).
+
+%% member with the current leader first
+members(Q) when ?amqqueue_is_quorum(Q) ->
+ {RaName, LeaderNode} = amqqueue:get_pid(Q),
+ Nodes = lists:delete(LeaderNode, get_nodes(Q)),
+ [{RaName, N} || N <- [LeaderNode | Nodes]].
+
+format_ra_event(ServerId, Evt, QRef) ->
+ {'$gen_cast', {queue_event, QRef, {ServerId, Evt}}}.
+
+make_ra_conf(Q, ServerId, TickTimeout) ->
+ QName = amqqueue:get_name(Q),
+ RaMachine = ra_machine(Q),
+ [{ClusterName, _} | _] = Members = members(Q),
+ UId = ra:new_uid(ra_lib:to_binary(ClusterName)),
+ FName = rabbit_misc:rs(QName),
+ Formatter = {?MODULE, format_ra_event, [QName]},
+ #{cluster_name => ClusterName,
+ id => ServerId,
+ uid => UId,
+ friendly_name => FName,
+ metrics_key => QName,
+ initial_members => Members,
+ log_init_args => #{uid => UId},
+ tick_timeout => TickTimeout,
+ machine => RaMachine,
+ ra_event_formatter => Formatter}.
+
+get_nodes(Q) when ?is_amqqueue(Q) ->
+ #{nodes := Nodes} = amqqueue:get_type_state(Q),
+ Nodes.
+
+update_type_state(Q, Fun) when ?is_amqqueue(Q) ->
+ Ts = amqqueue:get_type_state(Q),
+ amqqueue:set_type_state(Q, Fun(Ts)).
+
+overflow(undefined, Def, _QName) -> Def;
+overflow(<<"reject-publish">>, _Def, _QName) -> reject_publish;
+overflow(<<"drop-head">>, _Def, _QName) -> drop_head;
+overflow(<<"reject-publish-dlx">> = V, Def, QName) ->
+ rabbit_log:warning("Invalid overflow strategy ~p for quorum queue: ~p",
+ [V, rabbit_misc:rs(QName)]),
+ Def.
+
+parse_credit_args(Default, Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-credit">>) of
+ {table, T} ->
+ case {rabbit_misc:table_lookup(T, <<"credit">>),
+ rabbit_misc:table_lookup(T, <<"drain">>)} of
+ {{long, C}, {bool, D}} ->
+ {credited, C, D};
+ _ ->
+ {simple_prefetch, Default, false}
+ end;
+ undefined ->
+ {simple_prefetch, Default, false}
+ end.
diff --git a/deps/rabbit/src/rabbit_ra_registry.erl b/deps/rabbit/src/rabbit_ra_registry.erl
new file mode 100644
index 0000000000..b02d89eda5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_ra_registry.erl
@@ -0,0 +1,25 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ra_registry).
+
+-export([list_not_quorum_clusters/0]).
+
+%% Not all ra clusters are quorum queues. We need to keep a list of these so we don't
+%% take them into account in operations such as memory calculation and data cleanup.
+%% Hardcoded atm
+list_not_quorum_clusters() ->
+ [rabbit_stream_coordinator].
diff --git a/deps/rabbit/src/rabbit_reader.erl b/deps/rabbit/src/rabbit_reader.erl
new file mode 100644
index 0000000000..c91dbbc105
--- /dev/null
+++ b/deps/rabbit/src/rabbit_reader.erl
@@ -0,0 +1,1803 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_reader).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+%% This is an AMQP 0-9-1 connection implementation. If AMQP 1.0 plugin is enabled,
+%% this module passes control of incoming AMQP 1.0 connections to it.
+%%
+%% Every connection (as in, a process using this module)
+%% is a controlling process for a server socket.
+%%
+%% Connections have a number of responsibilities:
+%%
+%% * Performing protocol handshake
+%% * Parsing incoming data and dispatching protocol methods
+%% * Authenticating clients (with the help of authentication backends)
+%% * Enforcing TCP backpressure (throttling clients)
+%% * Enforcing connection limits, e.g. channel_max
+%% * Channel management
+%% * Setting up heartbeater and alarm notifications
+%% * Emitting connection and network activity metric events
+%% * Gracefully handling client disconnects, channel termination, etc
+%%
+%% and a few more.
+%%
+%% Every connection has
+%%
+%% * a queue collector which is responsible for keeping
+%% track of exclusive queues on the connection and their cleanup.
+%% * a heartbeater that's responsible for sending heartbeat frames to clients,
+%% keeping track of the incoming ones and notifying connection about
+%% heartbeat timeouts
+%% * Stats timer, a timer that is used to periodically emit metric events
+%%
+%% Some dependencies are started under a separate supervisor to avoid deadlocks
+%% during system shutdown. See rabbit_channel_sup:start_link/0 for details.
+%%
+%% Reader processes are special processes (in the OTP sense).
+
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([start_link/2, info_keys/0, info/1, info/2, force_event_refresh/2,
+ shutdown/2]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-export([init/3, mainloop/4, recvloop/4]).
+
+-export([conserve_resources/3, server_properties/1]).
+
+-define(NORMAL_TIMEOUT, 3).
+-define(CLOSING_TIMEOUT, 30).
+-define(CHANNEL_TERMINATION_TIMEOUT, 3).
+%% we wait for this many seconds before closing TCP connection
+%% with a client that failed to log in. Provides some relief
+%% from connection storms and DoS.
+-define(SILENT_CLOSE_DELAY, 3).
+-define(CHANNEL_MIN, 1).
+
+%%--------------------------------------------------------------------------
+
+-record(v1, {
+ %% parent process
+ parent,
+ %% socket
+ sock,
+ %% connection state, see connection record
+ connection,
+ callback,
+ recv_len,
+ pending_recv,
+ %% pre_init | securing | running | blocking | blocked | closing | closed | {become, F}
+ connection_state,
+ %% see comment in rabbit_connection_sup:start_link/0
+ helper_sup,
+ %% takes care of cleaning up exclusive queues,
+ %% see rabbit_queue_collector
+ queue_collector,
+ %% sends and receives heartbeat frames,
+ %% see rabbit_heartbeat
+ heartbeater,
+ %% timer used to emit statistics
+ stats_timer,
+ %% channel supervisor
+ channel_sup_sup_pid,
+ %% how many channels this connection has
+ channel_count,
+ %% throttling state, for both
+ %% credit- and resource-driven flow control
+ throttle,
+ proxy_socket}).
+
+-record(throttle, {
+ %% never | timestamp()
+ last_blocked_at,
+ %% a set of the reasons why we are
+ %% blocked: {resource, memory}, {resource, disk}.
+ %% More reasons can be added in the future.
+ blocked_by,
+ %% true if received any publishes, false otherwise
+ %% note that this will also be true when connection is
+ %% already blocked
+ should_block,
+ %% true if we had we sent a connection.blocked,
+ %% false otherwise
+ connection_blocked_message_sent
+}).
+
+-define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt,
+ send_pend, state, channels, reductions,
+ garbage_collection]).
+
+-define(SIMPLE_METRICS, [pid, recv_oct, send_oct, reductions]).
+-define(OTHER_METRICS, [recv_cnt, send_cnt, send_pend, state, channels,
+ garbage_collection]).
+
+-define(CREATION_EVENT_KEYS,
+ [pid, name, port, peer_port, host,
+ peer_host, ssl, peer_cert_subject, peer_cert_issuer,
+ peer_cert_validity, auth_mechanism, ssl_protocol,
+ ssl_key_exchange, ssl_cipher, ssl_hash, protocol, user, vhost,
+ timeout, frame_max, channel_max, client_properties, connected_at,
+ node, user_who_performed_action]).
+
+-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
+
+-define(AUTH_NOTIFICATION_INFO_KEYS,
+ [host, name, peer_host, peer_port, protocol, auth_mechanism,
+ ssl, ssl_protocol, ssl_cipher, peer_cert_issuer, peer_cert_subject,
+ peer_cert_validity]).
+
+-define(IS_RUNNING(State),
+ (State#v1.connection_state =:= running orelse
+ State#v1.connection_state =:= blocked)).
+
+-define(IS_STOPPING(State),
+ (State#v1.connection_state =:= closing orelse
+ State#v1.connection_state =:= closed)).
+
+%%--------------------------------------------------------------------------
+
+-type resource_alert() :: {WasAlarmSetForNode :: boolean(),
+ IsThereAnyAlarmsWithSameSourceInTheCluster :: boolean(),
+ NodeForWhichAlarmWasSetOrCleared :: node()}.
+
+%%--------------------------------------------------------------------------
+
+-spec start_link(pid(), any()) -> rabbit_types:ok(pid()).
+
+start_link(HelperSup, Ref) ->
+ Pid = proc_lib:spawn_link(?MODULE, init, [self(), HelperSup, Ref]),
+
+ {ok, Pid}.
+
+-spec shutdown(pid(), string()) -> 'ok'.
+
+shutdown(Pid, Explanation) ->
+ gen_server:call(Pid, {shutdown, Explanation}, infinity).
+
+-spec init(pid(), pid(), any()) -> no_return().
+
+init(Parent, HelperSup, Ref) ->
+ ?LG_PROCESS_TYPE(reader),
+ {ok, Sock} = rabbit_networking:handshake(Ref,
+ application:get_env(rabbit, proxy_protocol, false)),
+ Deb = sys:debug_options([]),
+ start_connection(Parent, HelperSup, Deb, Sock).
+
+-spec system_continue(_,_,{[binary()], non_neg_integer(), #v1{}}) -> any().
+
+system_continue(Parent, Deb, {Buf, BufLen, State}) ->
+ mainloop(Deb, Buf, BufLen, State#v1{parent = Parent}).
+
+-spec system_terminate(_,_,_,_) -> no_return().
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
+
+-spec info(pid()) -> rabbit_types:infos().
+
+info(Pid) ->
+ gen_server:call(Pid, info, infinity).
+
+-spec info(pid(), rabbit_types:info_keys()) -> rabbit_types:infos().
+
+info(Pid, Items) ->
+ case gen_server:call(Pid, {info, Items}, infinity) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end.
+
+-spec force_event_refresh(pid(), reference()) -> 'ok'.
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+force_event_refresh(Pid, Ref) ->
+ gen_server:cast(Pid, {force_event_refresh, Ref}).
+
+-spec conserve_resources(pid(), atom(), resource_alert()) -> 'ok'.
+
+conserve_resources(Pid, Source, {_, Conserve, _}) ->
+ Pid ! {conserve_resources, Source, Conserve},
+ ok.
+
+-spec server_properties(rabbit_types:protocol()) ->
+ rabbit_framing:amqp_table().
+
+server_properties(Protocol) ->
+ {ok, Product} = application:get_key(rabbit, description),
+ {ok, Version} = application:get_key(rabbit, vsn),
+
+ %% Get any configuration-specified server properties
+ {ok, RawConfigServerProps} = application:get_env(rabbit,
+ server_properties),
+
+ %% Normalize the simplified (2-tuple) and unsimplified (3-tuple) forms
+ %% from the config and merge them with the generated built-in properties
+ NormalizedConfigServerProps =
+ [{<<"capabilities">>, table, server_capabilities(Protocol)} |
+ [case X of
+ {KeyAtom, Value} -> {list_to_binary(atom_to_list(KeyAtom)),
+ longstr,
+ maybe_list_to_binary(Value)};
+ {BinKey, Type, Value} -> {BinKey, Type, Value}
+ end || X <- RawConfigServerProps ++
+ [{product, Product},
+ {version, Version},
+ {cluster_name, rabbit_nodes:cluster_name()},
+ {platform, rabbit_misc:platform_and_version()},
+ {copyright, ?COPYRIGHT_MESSAGE},
+ {information, ?INFORMATION_MESSAGE}]]],
+
+ %% Filter duplicated properties in favour of config file provided values
+ lists:usort(fun ({K1,_,_}, {K2,_,_}) -> K1 =< K2 end,
+ NormalizedConfigServerProps).
+
+maybe_list_to_binary(V) when is_binary(V) -> V;
+maybe_list_to_binary(V) when is_list(V) -> list_to_binary(V).
+
+server_capabilities(rabbit_framing_amqp_0_9_1) ->
+ [{<<"publisher_confirms">>, bool, true},
+ {<<"exchange_exchange_bindings">>, bool, true},
+ {<<"basic.nack">>, bool, true},
+ {<<"consumer_cancel_notify">>, bool, true},
+ {<<"connection.blocked">>, bool, true},
+ {<<"consumer_priorities">>, bool, true},
+ {<<"authentication_failure_close">>, bool, true},
+ {<<"per_consumer_qos">>, bool, true},
+ {<<"direct_reply_to">>, bool, true}];
+server_capabilities(_) ->
+ [].
+
+%%--------------------------------------------------------------------------
+
+socket_error(Reason) when is_atom(Reason) ->
+ rabbit_log_connection:error("Error on AMQP connection ~p: ~s~n",
+ [self(), rabbit_misc:format_inet_error(Reason)]);
+socket_error(Reason) ->
+ Fmt = "Error on AMQP connection ~p:~n~p~n",
+ Args = [self(), Reason],
+ case Reason of
+ %% The socket was closed while upgrading to SSL.
+ %% This is presumably a TCP healthcheck, so don't log
+ %% it unless specified otherwise.
+ {ssl_upgrade_error, closed} ->
+ %% Lager sinks (rabbit_log_connection)
+ %% are handled by the lager parse_transform.
+ %% Hence have to define the loglevel as a function call.
+ rabbit_log_connection:debug(Fmt, Args);
+ _ ->
+ rabbit_log_connection:error(Fmt, Args)
+ end.
+
+inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
+
+socket_op(Sock, Fun) ->
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ case Fun(Sock) of
+ {ok, Res} -> Res;
+ {error, Reason} -> socket_error(Reason),
+ rabbit_net:fast_close(RealSocket),
+ exit(normal)
+ end.
+
+-spec start_connection(pid(), pid(), any(), rabbit_net:socket()) ->
+ no_return().
+
+start_connection(Parent, HelperSup, Deb, Sock) ->
+ process_flag(trap_exit, true),
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ Name = case rabbit_net:connection_string(Sock, inbound) of
+ {ok, Str} -> list_to_binary(Str);
+ {error, enotconn} -> rabbit_net:fast_close(RealSocket),
+ exit(normal);
+ {error, Reason} -> socket_error(Reason),
+ rabbit_net:fast_close(RealSocket),
+ exit(normal)
+ end,
+ {ok, HandshakeTimeout} = application:get_env(rabbit, handshake_timeout),
+ InitialFrameMax = application:get_env(rabbit, initial_frame_max, ?FRAME_MIN_SIZE),
+ erlang:send_after(HandshakeTimeout, self(), handshake_timeout),
+ {PeerHost, PeerPort, Host, Port} =
+ socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end),
+ ?store_proc_name(Name),
+ State = #v1{parent = Parent,
+ sock = RealSocket,
+ connection = #connection{
+ name = Name,
+ log_name = Name,
+ host = Host,
+ peer_host = PeerHost,
+ port = Port,
+ peer_port = PeerPort,
+ protocol = none,
+ user = none,
+ timeout_sec = (HandshakeTimeout / 1000),
+ frame_max = InitialFrameMax,
+ vhost = none,
+ client_properties = none,
+ capabilities = [],
+ auth_mechanism = none,
+ auth_state = none,
+ connected_at = os:system_time(
+ milli_seconds)},
+ callback = uninitialized_callback,
+ recv_len = 0,
+ pending_recv = false,
+ connection_state = pre_init,
+ queue_collector = undefined, %% started on tune-ok
+ helper_sup = HelperSup,
+ heartbeater = none,
+ channel_sup_sup_pid = none,
+ channel_count = 0,
+ throttle = #throttle{
+ last_blocked_at = never,
+ should_block = false,
+ blocked_by = sets:new(),
+ connection_blocked_message_sent = false
+ },
+ proxy_socket = rabbit_net:maybe_get_proxy_socket(Sock)},
+ try
+ case run({?MODULE, recvloop,
+ [Deb, [], 0, switch_callback(rabbit_event:init_stats_timer(
+ State, #v1.stats_timer),
+ handshake, 8)]}) of
+ %% connection was closed cleanly by the client
+ #v1{connection = #connection{user = #user{username = Username},
+ vhost = VHost}} ->
+ rabbit_log_connection:info("closing AMQP connection ~p (~s, vhost: '~s', user: '~s')~n",
+ [self(), dynamic_connection_name(Name), VHost, Username]);
+ %% just to be more defensive
+ _ ->
+ rabbit_log_connection:info("closing AMQP connection ~p (~s)~n",
+ [self(), dynamic_connection_name(Name)])
+ end
+ catch
+ Ex ->
+ log_connection_exception(dynamic_connection_name(Name), Ex)
+ after
+ %% We don't call gen_tcp:close/1 here since it waits for
+ %% pending output to be sent, which results in unnecessary
+ %% delays. We could just terminate - the reader is the
+ %% controlling process and hence its termination will close
+ %% the socket. However, to keep the file_handle_cache
+ %% accounting as accurate as possible we ought to close the
+ %% socket w/o delay before termination.
+ rabbit_net:fast_close(RealSocket),
+ rabbit_networking:unregister_connection(self()),
+ rabbit_core_metrics:connection_closed(self()),
+ ClientProperties = case get(client_properties) of
+ undefined ->
+ [];
+ Properties ->
+ Properties
+ end,
+ EventProperties = [{name, Name},
+ {pid, self()},
+ {node, node()},
+ {client_properties, ClientProperties}],
+ EventProperties1 = case get(connection_user_provided_name) of
+ undefined ->
+ EventProperties;
+ ConnectionUserProvidedName ->
+ [{user_provided_name, ConnectionUserProvidedName} | EventProperties]
+ end,
+ rabbit_event:notify(connection_closed, EventProperties1)
+ end,
+ done.
+
+log_connection_exception(Name, Ex) ->
+ Severity = case Ex of
+ connection_closed_with_no_data_received -> debug;
+ {connection_closed_abruptly, _} -> warning;
+ connection_closed_abruptly -> warning;
+ _ -> error
+ end,
+ log_connection_exception(Severity, Name, Ex).
+
+log_connection_exception(Severity, Name, {heartbeat_timeout, TimeoutSec}) ->
+ %% Long line to avoid extra spaces and line breaks in log
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~n"
+ "missed heartbeats from client, timeout: ~ps~n",
+ [self(), Name, TimeoutSec]);
+log_connection_exception(Severity, Name, {connection_closed_abruptly,
+ #v1{connection = #connection{user = #user{username = Username},
+ vhost = VHost}}}) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s, vhost: '~s', user: '~s'):~nclient unexpectedly closed TCP connection~n",
+ [self(), Name, VHost, Username]);
+%% when client abruptly closes connection before connection.open/authentication/authorization
+%% succeeded, don't log username and vhost as 'none'
+log_connection_exception(Severity, Name, {connection_closed_abruptly, _}) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~nclient unexpectedly closed TCP connection~n",
+ [self(), Name]);
+%% failed connection.tune negotiations
+log_connection_exception(Severity, Name, {handshake_error, tuning, _Channel,
+ {exit, #amqp_error{explanation = Explanation},
+ _Method, _Stacktrace}}) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~nfailed to negotiate connection parameters: ~s~n",
+ [self(), Name, Explanation]);
+%% old exception structure
+log_connection_exception(Severity, Name, connection_closed_abruptly) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~n"
+ "client unexpectedly closed TCP connection~n",
+ [self(), Name]);
+log_connection_exception(Severity, Name, Ex) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~n~p~n",
+ [self(), Name, Ex]).
+
+log_connection_exception_with_severity(Severity, Fmt, Args) ->
+ case Severity of
+ debug -> rabbit_log_connection:debug(Fmt, Args);
+ warning -> rabbit_log_connection:warning(Fmt, Args);
+ error -> rabbit_log_connection:error(Fmt, Args)
+ end.
+
+run({M, F, A}) ->
+ try apply(M, F, A)
+ catch {become, MFA} -> run(MFA)
+ end.
+
+recvloop(Deb, Buf, BufLen, State = #v1{pending_recv = true}) ->
+ mainloop(Deb, Buf, BufLen, State);
+recvloop(Deb, Buf, BufLen, State = #v1{connection_state = blocked}) ->
+ mainloop(Deb, Buf, BufLen, State);
+recvloop(Deb, Buf, BufLen, State = #v1{connection_state = {become, F}}) ->
+ throw({become, F(Deb, Buf, BufLen, State)});
+recvloop(Deb, Buf, BufLen, State = #v1{sock = Sock, recv_len = RecvLen})
+ when BufLen < RecvLen ->
+ case rabbit_net:setopts(Sock, [{active, once}]) of
+ ok -> mainloop(Deb, Buf, BufLen,
+ State#v1{pending_recv = true});
+ {error, Reason} -> stop(Reason, State)
+ end;
+recvloop(Deb, [B], _BufLen, State) ->
+ {Rest, State1} = handle_input(State#v1.callback, B, State),
+ recvloop(Deb, [Rest], size(Rest), State1);
+recvloop(Deb, Buf, BufLen, State = #v1{recv_len = RecvLen}) ->
+ {DataLRev, RestLRev} = binlist_split(BufLen - RecvLen, Buf, []),
+ Data = list_to_binary(lists:reverse(DataLRev)),
+ {<<>>, State1} = handle_input(State#v1.callback, Data, State),
+ recvloop(Deb, lists:reverse(RestLRev), BufLen - RecvLen, State1).
+
+binlist_split(0, L, Acc) ->
+ {L, Acc};
+binlist_split(Len, L, [Acc0|Acc]) when Len < 0 ->
+ {H, T} = split_binary(Acc0, -Len),
+ {[H|L], [T|Acc]};
+binlist_split(Len, [H|T], Acc) ->
+ binlist_split(Len - size(H), T, [H|Acc]).
+
+-spec mainloop(_,[binary()], non_neg_integer(), #v1{}) -> any().
+
+mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock,
+ connection_state = CS,
+ connection = #connection{
+ name = ConnName}}) ->
+ Recv = rabbit_net:recv(Sock),
+ case CS of
+ pre_init when Buf =:= [] ->
+ %% We only log incoming connections when either the
+ %% first byte was received or there was an error (eg. a
+ %% timeout).
+ %%
+ %% The goal is to not log TCP healthchecks (a connection
+ %% with no data received) unless specified otherwise.
+ Fmt = "accepting AMQP connection ~p (~s)~n",
+ Args = [self(), ConnName],
+ case Recv of
+ closed -> rabbit_log_connection:debug(Fmt, Args);
+ _ -> rabbit_log_connection:info(Fmt, Args)
+ end;
+ _ ->
+ ok
+ end,
+ case Recv of
+ {data, Data} ->
+ recvloop(Deb, [Data | Buf], BufLen + size(Data),
+ State#v1{pending_recv = false});
+ closed when State#v1.connection_state =:= closed ->
+ State;
+ closed when CS =:= pre_init andalso Buf =:= [] ->
+ stop(tcp_healthcheck, State);
+ closed ->
+ stop(closed, State);
+ {other, {heartbeat_send_error, Reason}} ->
+ %% The only portable way to detect disconnect on blocked
+ %% connection is to wait for heartbeat send failure.
+ stop(Reason, State);
+ {error, Reason} ->
+ stop(Reason, State);
+ {other, {system, From, Request}} ->
+ sys:handle_system_msg(Request, From, State#v1.parent,
+ ?MODULE, Deb, {Buf, BufLen, State});
+ {other, Other} ->
+ case handle_other(Other, State) of
+ stop -> State;
+ NewState -> recvloop(Deb, Buf, BufLen, NewState)
+ end
+ end.
+
+-spec stop(_, #v1{}) -> no_return().
+stop(tcp_healthcheck, State) ->
+ %% The connection was closed before any packet was received. It's
+ %% probably a load-balancer healthcheck: don't consider this a
+ %% failure.
+ maybe_emit_stats(State),
+ throw(connection_closed_with_no_data_received);
+stop(closed, State) ->
+ maybe_emit_stats(State),
+ throw({connection_closed_abruptly, State});
+stop(Reason, State) ->
+ maybe_emit_stats(State),
+ throw({inet_error, Reason}).
+
+handle_other({conserve_resources, Source, Conserve},
+ State = #v1{throttle = Throttle = #throttle{blocked_by = Blockers}}) ->
+ Resource = {resource, Source},
+ Blockers1 = case Conserve of
+ true -> sets:add_element(Resource, Blockers);
+ false -> sets:del_element(Resource, Blockers)
+ end,
+ control_throttle(State#v1{throttle = Throttle#throttle{blocked_by = Blockers1}});
+handle_other({channel_closing, ChPid}, State) ->
+ ok = rabbit_channel:ready_for_close(ChPid),
+ {_, State1} = channel_cleanup(ChPid, State),
+ maybe_close(control_throttle(State1));
+handle_other({'EXIT', Parent, normal}, State = #v1{parent = Parent}) ->
+ %% rabbitmq/rabbitmq-server#544
+ %% The connection port process has exited due to the TCP socket being closed.
+ %% Handle this case in the same manner as receiving {error, closed}
+ stop(closed, State);
+handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) ->
+ Msg = io_lib:format("broker forced connection closure with reason '~w'", [Reason]),
+ terminate(Msg, State),
+ %% this is what we are expected to do according to
+ %% https://www.erlang.org/doc/man/sys.html
+ %%
+ %% If we wanted to be *really* nice we should wait for a while for
+ %% clients to close the socket at their end, just as we do in the
+ %% ordinary error case. However, since this termination is
+ %% initiated by our parent it is probably more important to exit
+ %% quickly.
+ maybe_emit_stats(State),
+ exit(Reason);
+handle_other({channel_exit, _Channel, E = {writer, send_failed, _E}}, State) ->
+ maybe_emit_stats(State),
+ throw(E);
+handle_other({channel_exit, Channel, Reason}, State) ->
+ handle_exception(State, Channel, Reason);
+handle_other({'DOWN', _MRef, process, ChPid, Reason}, State) ->
+ handle_dependent_exit(ChPid, Reason, State);
+handle_other(terminate_connection, State) ->
+ maybe_emit_stats(State),
+ stop;
+handle_other(handshake_timeout, State)
+ when ?IS_RUNNING(State) orelse ?IS_STOPPING(State) ->
+ State;
+handle_other(handshake_timeout, State) ->
+ maybe_emit_stats(State),
+ throw({handshake_timeout, State#v1.callback});
+handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) ->
+ State;
+handle_other(heartbeat_timeout,
+ State = #v1{connection = #connection{timeout_sec = T}}) ->
+ maybe_emit_stats(State),
+ throw({heartbeat_timeout, T});
+handle_other({'$gen_call', From, {shutdown, Explanation}}, State) ->
+ {ForceTermination, NewState} = terminate(Explanation, State),
+ gen_server:reply(From, ok),
+ case ForceTermination of
+ force -> stop;
+ normal -> NewState
+ end;
+handle_other({'$gen_call', From, info}, State) ->
+ gen_server:reply(From, infos(?INFO_KEYS, State)),
+ State;
+handle_other({'$gen_call', From, {info, Items}}, State) ->
+ gen_server:reply(From, try {ok, infos(Items, State)}
+ catch Error -> {error, Error}
+ end),
+ State;
+handle_other({'$gen_cast', {force_event_refresh, Ref}}, State)
+ when ?IS_RUNNING(State) ->
+ rabbit_event:notify(
+ connection_created,
+ augment_infos_with_user_provided_connection_name(
+ [{type, network} | infos(?CREATION_EVENT_KEYS, State)], State),
+ Ref),
+ rabbit_event:init_stats_timer(State, #v1.stats_timer);
+handle_other({'$gen_cast', {force_event_refresh, _Ref}}, State) ->
+ %% Ignore, we will emit a created event once we start running.
+ State;
+handle_other(ensure_stats, State) ->
+ ensure_stats_timer(State);
+handle_other(emit_stats, State) ->
+ emit_stats(State);
+handle_other({bump_credit, Msg}, State) ->
+ %% Here we are receiving credit by some channel process.
+ credit_flow:handle_bump_msg(Msg),
+ control_throttle(State);
+handle_other(Other, State) ->
+ %% internal error -> something worth dying for
+ maybe_emit_stats(State),
+ exit({unexpected_message, Other}).
+
+switch_callback(State, Callback, Length) ->
+ State#v1{callback = Callback, recv_len = Length}.
+
+terminate(Explanation, State) when ?IS_RUNNING(State) ->
+ {normal, handle_exception(State, 0,
+ rabbit_misc:amqp_error(
+ connection_forced, "~s", [Explanation], none))};
+terminate(_Explanation, State) ->
+ {force, State}.
+
+send_blocked(#v1{connection = #connection{protocol = Protocol,
+ capabilities = Capabilities},
+ sock = Sock}, Reason) ->
+ case rabbit_misc:table_lookup(Capabilities, <<"connection.blocked">>) of
+ {bool, true} ->
+
+ ok = send_on_channel0(Sock, #'connection.blocked'{reason = Reason},
+ Protocol);
+ _ ->
+ ok
+ end.
+
+send_unblocked(#v1{connection = #connection{protocol = Protocol,
+ capabilities = Capabilities},
+ sock = Sock}) ->
+ case rabbit_misc:table_lookup(Capabilities, <<"connection.blocked">>) of
+ {bool, true} ->
+ ok = send_on_channel0(Sock, #'connection.unblocked'{}, Protocol);
+ _ ->
+ ok
+ end.
+
+%%--------------------------------------------------------------------------
+%% error handling / termination
+
+close_connection(State = #v1{queue_collector = Collector,
+ connection = #connection{
+ timeout_sec = TimeoutSec}}) ->
+ %% The spec says "Exclusive queues may only be accessed by the
+ %% current connection, and are deleted when that connection
+ %% closes." This does not strictly imply synchrony, but in
+ %% practice it seems to be what people assume.
+ clean_up_exclusive_queues(Collector),
+ %% We terminate the connection after the specified interval, but
+ %% no later than ?CLOSING_TIMEOUT seconds.
+ erlang:send_after((if TimeoutSec > 0 andalso
+ TimeoutSec < ?CLOSING_TIMEOUT -> TimeoutSec;
+ true -> ?CLOSING_TIMEOUT
+ end) * 1000, self(), terminate_connection),
+ State#v1{connection_state = closed}.
+
+%% queue collector will be undefined when connection
+%% tuning was never performed or didn't finish. In such cases
+%% there's also nothing to clean up.
+clean_up_exclusive_queues(undefined) ->
+ ok;
+
+clean_up_exclusive_queues(Collector) ->
+ rabbit_queue_collector:delete_all(Collector).
+
+handle_dependent_exit(ChPid, Reason, State) ->
+ {Channel, State1} = channel_cleanup(ChPid, State),
+ case {Channel, termination_kind(Reason)} of
+ {undefined, controlled} -> State1;
+ {undefined, uncontrolled} -> handle_uncontrolled_channel_close(ChPid),
+ exit({abnormal_dependent_exit,
+ ChPid, Reason});
+ {_, controlled} -> maybe_close(control_throttle(State1));
+ {_, uncontrolled} -> handle_uncontrolled_channel_close(ChPid),
+ State2 = handle_exception(
+ State1, Channel, Reason),
+ maybe_close(control_throttle(State2))
+ end.
+
+terminate_channels(#v1{channel_count = 0} = State) ->
+ State;
+terminate_channels(#v1{channel_count = ChannelCount} = State) ->
+ lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
+ Timeout = 1000 * ?CHANNEL_TERMINATION_TIMEOUT * ChannelCount,
+ TimerRef = erlang:send_after(Timeout, self(), cancel_wait),
+ wait_for_channel_termination(ChannelCount, TimerRef, State).
+
+wait_for_channel_termination(0, TimerRef, State) ->
+ case erlang:cancel_timer(TimerRef) of
+ false -> receive
+ cancel_wait -> State
+ end;
+ _ -> State
+ end;
+wait_for_channel_termination(N, TimerRef,
+ State = #v1{connection_state = CS,
+ connection = #connection{
+ log_name = ConnName,
+ user = User,
+ vhost = VHost},
+ sock = Sock}) ->
+ receive
+ {'DOWN', _MRef, process, ChPid, Reason} ->
+ {Channel, State1} = channel_cleanup(ChPid, State),
+ case {Channel, termination_kind(Reason)} of
+ {undefined, _} ->
+ exit({abnormal_dependent_exit, ChPid, Reason});
+ {_, controlled} ->
+ wait_for_channel_termination(N-1, TimerRef, State1);
+ {_, uncontrolled} ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s, vhost: '~s',"
+ " user: '~s', state: ~p), channel ~p:"
+ "error while terminating:~n~p~n",
+ [self(), ConnName, VHost, User#user.username,
+ CS, Channel, Reason]),
+ handle_uncontrolled_channel_close(ChPid),
+ wait_for_channel_termination(N-1, TimerRef, State1)
+ end;
+ {'EXIT', Sock, _Reason} ->
+ clean_up_all_channels(State),
+ exit(normal);
+ cancel_wait ->
+ exit(channel_termination_timeout)
+ end.
+
+maybe_close(State = #v1{connection_state = closing,
+ channel_count = 0,
+ connection = #connection{protocol = Protocol},
+ sock = Sock}) ->
+ NewState = close_connection(State),
+ ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
+ NewState;
+maybe_close(State) ->
+ State.
+
+termination_kind(normal) -> controlled;
+termination_kind(_) -> uncontrolled.
+
+format_hard_error(#amqp_error{name = N, explanation = E, method = M}) ->
+ io_lib:format("operation ~s caused a connection exception ~s: ~p", [M, N, E]);
+format_hard_error(Reason) ->
+ case io_lib:deep_char_list(Reason) of
+ true -> Reason;
+ false -> rabbit_misc:format("~p", [Reason])
+ end.
+
+log_hard_error(#v1{connection_state = CS,
+ connection = #connection{
+ log_name = ConnName,
+ user = User,
+ vhost = VHost}}, Channel, Reason) ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s, vhost: '~s',"
+ " user: '~s', state: ~p), channel ~p:~n ~s~n",
+ [self(), ConnName, VHost, User#user.username, CS, Channel, format_hard_error(Reason)]).
+
+handle_exception(State = #v1{connection_state = closed}, Channel, Reason) ->
+ log_hard_error(State, Channel, Reason),
+ State;
+handle_exception(State = #v1{connection = #connection{protocol = Protocol},
+ connection_state = CS},
+ Channel, Reason)
+ when ?IS_RUNNING(State) orelse CS =:= closing ->
+ respond_and_close(State, Channel, Protocol, Reason, Reason);
+%% authentication failure
+handle_exception(State = #v1{connection = #connection{protocol = Protocol,
+ log_name = ConnName,
+ capabilities = Capabilities},
+ connection_state = starting},
+ Channel, Reason = #amqp_error{name = access_refused,
+ explanation = ErrMsg}) ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s, state: ~p):~n~s~n",
+ [self(), ConnName, starting, ErrMsg]),
+ %% respect authentication failure notification capability
+ case rabbit_misc:table_lookup(Capabilities,
+ <<"authentication_failure_close">>) of
+ {bool, true} ->
+ send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
+ _ ->
+ close_connection(terminate_channels(State))
+ end;
+%% when loopback-only user tries to connect from a non-local host
+%% when user tries to access a vhost it has no permissions for
+handle_exception(State = #v1{connection = #connection{protocol = Protocol,
+ log_name = ConnName,
+ user = User},
+ connection_state = opening},
+ Channel, Reason = #amqp_error{name = not_allowed,
+ explanation = ErrMsg}) ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s, user: '~s', state: ~p):~n~s~n",
+ [self(), ConnName, User#user.username, opening, ErrMsg]),
+ send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
+handle_exception(State = #v1{connection = #connection{protocol = Protocol},
+ connection_state = CS = opening},
+ Channel, Reason = #amqp_error{}) ->
+ respond_and_close(State, Channel, Protocol, Reason,
+ {handshake_error, CS, Reason});
+%% when negotiation fails, e.g. due to channel_max being higher than the
+%% maximum allowed limit
+handle_exception(State = #v1{connection = #connection{protocol = Protocol,
+ log_name = ConnName,
+ user = User},
+ connection_state = tuning},
+ Channel, Reason = #amqp_error{name = not_allowed,
+ explanation = ErrMsg}) ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s,"
+ " user: '~s', state: ~p):~n~s~n",
+ [self(), ConnName, User#user.username, tuning, ErrMsg]),
+ send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
+handle_exception(State, Channel, Reason) ->
+ %% We don't trust the client at this point - force them to wait
+ %% for a bit so they can't DOS us with repeated failed logins etc.
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ throw({handshake_error, State#v1.connection_state, Channel, Reason}).
+
+%% we've "lost sync" with the client and hence must not accept any
+%% more input
+-spec fatal_frame_error(_, _, _, _, _) -> no_return().
+fatal_frame_error(Error, Type, Channel, Payload, State) ->
+ frame_error(Error, Type, Channel, Payload, State),
+ %% grace period to allow transmission of error
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ throw(fatal_frame_error).
+
+frame_error(Error, Type, Channel, Payload, State) ->
+ {Str, Bin} = payload_snippet(Payload),
+ handle_exception(State, Channel,
+ rabbit_misc:amqp_error(frame_error,
+ "type ~p, ~s octets = ~p: ~p",
+ [Type, Str, Bin, Error], none)).
+
+unexpected_frame(Type, Channel, Payload, State) ->
+ {Str, Bin} = payload_snippet(Payload),
+ handle_exception(State, Channel,
+ rabbit_misc:amqp_error(unexpected_frame,
+ "type ~p, ~s octets = ~p",
+ [Type, Str, Bin], none)).
+
+payload_snippet(Payload) when size(Payload) =< 16 ->
+ {"all", Payload};
+payload_snippet(<<Snippet:16/binary, _/binary>>) ->
+ {"first 16", Snippet}.
+
+%%--------------------------------------------------------------------------
+
+create_channel(_Channel,
+ #v1{channel_count = ChannelCount,
+ connection = #connection{channel_max = ChannelMax}})
+ when ChannelMax /= 0 andalso ChannelCount >= ChannelMax ->
+ {error, rabbit_misc:amqp_error(
+ not_allowed, "number of channels opened (~w) has reached the "
+ "negotiated channel_max (~w)",
+ [ChannelCount, ChannelMax], 'none')};
+create_channel(Channel,
+ #v1{sock = Sock,
+ queue_collector = Collector,
+ channel_sup_sup_pid = ChanSupSup,
+ channel_count = ChannelCount,
+ connection =
+ #connection{name = Name,
+ protocol = Protocol,
+ frame_max = FrameMax,
+ vhost = VHost,
+ capabilities = Capabilities,
+ user = #user{username = Username} = User}
+ } = State) ->
+ case rabbit_auth_backend_internal:is_over_channel_limit(Username) of
+ false ->
+ {ok, _ChSupPid, {ChPid, AState}} =
+ rabbit_channel_sup_sup:start_channel(
+ ChanSupSup, {tcp, Sock, Channel, FrameMax, self(), Name,
+ Protocol, User, VHost, Capabilities,
+ Collector}),
+ MRef = erlang:monitor(process, ChPid),
+ put({ch_pid, ChPid}, {Channel, MRef}),
+ put({channel, Channel}, {ChPid, AState}),
+ {ok, {ChPid, AState}, State#v1{channel_count = ChannelCount + 1}};
+ {true, Limit} ->
+ {error, rabbit_misc:amqp_error(not_allowed,
+ "number of channels opened for user '~s' has reached "
+ "the maximum allowed user limit of (~w)",
+ [Username, Limit], 'none')}
+ end.
+
+channel_cleanup(ChPid, State = #v1{channel_count = ChannelCount}) ->
+ case get({ch_pid, ChPid}) of
+ undefined -> {undefined, State};
+ {Channel, MRef} -> credit_flow:peer_down(ChPid),
+ erase({channel, Channel}),
+ erase({ch_pid, ChPid}),
+ erlang:demonitor(MRef, [flush]),
+ {Channel, State#v1{channel_count = ChannelCount - 1}}
+ end.
+
+all_channels() -> [ChPid || {{ch_pid, ChPid}, _ChannelMRef} <- get()].
+
+clean_up_all_channels(State) ->
+ CleanupFun = fun(ChPid) ->
+ channel_cleanup(ChPid, State)
+ end,
+ lists:foreach(CleanupFun, all_channels()).
+
+%%--------------------------------------------------------------------------
+
+handle_frame(Type, 0, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}})
+ when ?IS_STOPPING(State) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ {method, MethodName, FieldsBin} ->
+ handle_method0(MethodName, FieldsBin, State);
+ _Other -> State
+ end;
+handle_frame(Type, 0, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}}) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ error -> frame_error(unknown_frame, Type, 0, Payload, State);
+ heartbeat -> State;
+ {method, MethodName, FieldsBin} ->
+ handle_method0(MethodName, FieldsBin, State);
+ _Other -> unexpected_frame(Type, 0, Payload, State)
+ end;
+handle_frame(Type, Channel, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}})
+ when ?IS_RUNNING(State) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ error -> frame_error(unknown_frame, Type, Channel, Payload, State);
+ heartbeat -> unexpected_frame(Type, Channel, Payload, State);
+ Frame -> process_frame(Frame, Channel, State)
+ end;
+handle_frame(_Type, _Channel, _Payload, State) when ?IS_STOPPING(State) ->
+ State;
+handle_frame(Type, Channel, Payload, State) ->
+ unexpected_frame(Type, Channel, Payload, State).
+
+process_frame(Frame, Channel, State) ->
+ ChKey = {channel, Channel},
+ case (case get(ChKey) of
+ undefined -> create_channel(Channel, State);
+ Other -> {ok, Other, State}
+ end) of
+ {error, Error} ->
+ handle_exception(State, Channel, Error);
+ {ok, {ChPid, AState}, State1} ->
+ case rabbit_command_assembler:process(Frame, AState) of
+ {ok, NewAState} ->
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, State1);
+ {ok, Method, NewAState} ->
+ rabbit_channel:do(ChPid, Method),
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, State1);
+ {ok, Method, Content, NewAState} ->
+ rabbit_channel:do_flow(ChPid, Method, Content),
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, control_throttle(State1));
+ {error, Reason} ->
+ handle_exception(State1, Channel, Reason)
+ end
+ end.
+
+post_process_frame({method, 'channel.close_ok', _}, ChPid, State) ->
+ {_, State1} = channel_cleanup(ChPid, State),
+ %% This is not strictly necessary, but more obviously
+ %% correct. Also note that we do not need to call maybe_close/1
+ %% since we cannot possibly be in the 'closing' state.
+ control_throttle(State1);
+post_process_frame({content_header, _, _, _, _}, _ChPid, State) ->
+ publish_received(State);
+post_process_frame({content_body, _}, _ChPid, State) ->
+ publish_received(State);
+post_process_frame(_Frame, _ChPid, State) ->
+ State.
+
+%%--------------------------------------------------------------------------
+
+%% We allow clients to exceed the frame size a little bit since quite
+%% a few get it wrong - off-by 1 or 8 (empty frame size) are typical.
+-define(FRAME_SIZE_FUDGE, ?EMPTY_FRAME_SIZE).
+
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32, _/binary>>,
+ State = #v1{connection = #connection{frame_max = FrameMax}})
+ when FrameMax /= 0 andalso
+ PayloadSize > FrameMax - ?EMPTY_FRAME_SIZE + ?FRAME_SIZE_FUDGE ->
+ fatal_frame_error(
+ {frame_too_large, PayloadSize, FrameMax - ?EMPTY_FRAME_SIZE},
+ Type, Channel, <<>>, State);
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32,
+ Payload:PayloadSize/binary, ?FRAME_END,
+ Rest/binary>>,
+ State) ->
+ {Rest, ensure_stats_timer(handle_frame(Type, Channel, Payload, State))};
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32, Rest/binary>>,
+ State) ->
+ {Rest, ensure_stats_timer(
+ switch_callback(State,
+ {frame_payload, Type, Channel, PayloadSize},
+ PayloadSize + 1))};
+handle_input({frame_payload, Type, Channel, PayloadSize}, Data, State) ->
+ <<Payload:PayloadSize/binary, EndMarker, Rest/binary>> = Data,
+ case EndMarker of
+ ?FRAME_END -> State1 = handle_frame(Type, Channel, Payload, State),
+ {Rest, switch_callback(State1, frame_header, 7)};
+ _ -> fatal_frame_error({invalid_frame_end_marker, EndMarker},
+ Type, Channel, Payload, State)
+ end;
+handle_input(handshake, <<"AMQP", A, B, C, D, Rest/binary>>, State) ->
+ {Rest, handshake({A, B, C, D}, State)};
+handle_input(handshake, <<Other:8/binary, _/binary>>, #v1{sock = Sock}) ->
+ refuse_connection(Sock, {bad_header, Other});
+handle_input(Callback, Data, _State) ->
+ throw({bad_input, Callback, Data}).
+
+%% The two rules pertaining to version negotiation:
+%%
+%% * If the server cannot support the protocol specified in the
+%% protocol header, it MUST respond with a valid protocol header and
+%% then close the socket connection.
+%%
+%% * The server MUST provide a protocol version that is lower than or
+%% equal to that requested by the client in the protocol header.
+handshake({0, 0, 9, 1}, State) ->
+ start_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State);
+
+%% This is the protocol header for 0-9, which we can safely treat as
+%% though it were 0-9-1.
+handshake({1, 1, 0, 9}, State) ->
+ start_connection({0, 9, 0}, rabbit_framing_amqp_0_9_1, State);
+
+%% This is what most clients send for 0-8. The 0-8 spec, confusingly,
+%% defines the version as 8-0.
+handshake({1, 1, 8, 0}, State) ->
+ start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
+
+%% The 0-8 spec as on the AMQP web site actually has this as the
+%% protocol header; some libraries e.g., py-amqplib, send it when they
+%% want 0-8.
+handshake({1, 1, 9, 1}, State) ->
+ start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
+
+%% ... and finally, the 1.0 spec is crystal clear!
+handshake({Id, 1, 0, 0}, State) ->
+ become_1_0(Id, State);
+
+handshake(Vsn, #v1{sock = Sock}) ->
+ refuse_connection(Sock, {bad_version, Vsn}).
+
+%% Offer a protocol version to the client. Connection.start only
+%% includes a major and minor version number, Luckily 0-9 and 0-9-1
+%% are similar enough that clients will be happy with either.
+start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision},
+ Protocol,
+ State = #v1{sock = Sock, connection = Connection}) ->
+ rabbit_networking:register_connection(self()),
+ Start = #'connection.start'{
+ version_major = ProtocolMajor,
+ version_minor = ProtocolMinor,
+ server_properties = server_properties(Protocol),
+ mechanisms = auth_mechanisms_binary(Sock),
+ locales = <<"en_US">> },
+ ok = send_on_channel0(Sock, Start, Protocol),
+ switch_callback(State#v1{connection = Connection#connection{
+ timeout_sec = ?NORMAL_TIMEOUT,
+ protocol = Protocol},
+ connection_state = starting},
+ frame_header, 7).
+
+-spec refuse_connection(_, _, _) -> no_return().
+refuse_connection(Sock, Exception, {A, B, C, D}) ->
+ ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",A,B,C,D>>) end),
+ throw(Exception).
+
+-spec refuse_connection(rabbit_net:socket(), any()) -> no_return().
+
+refuse_connection(Sock, Exception) ->
+ refuse_connection(Sock, Exception, {0, 0, 9, 1}).
+
+ensure_stats_timer(State = #v1{connection_state = running}) ->
+ rabbit_event:ensure_stats_timer(State, #v1.stats_timer, emit_stats);
+ensure_stats_timer(State) ->
+ State.
+
+%%--------------------------------------------------------------------------
+
+handle_method0(MethodName, FieldsBin,
+ State = #v1{connection = #connection{protocol = Protocol}}) ->
+ try
+ handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin),
+ State)
+ catch throw:{inet_error, E} when E =:= closed; E =:= enotconn ->
+ maybe_emit_stats(State),
+ throw({connection_closed_abruptly, State});
+ exit:#amqp_error{method = none} = Reason ->
+ handle_exception(State, 0, Reason#amqp_error{method = MethodName});
+ Type:Reason:Stacktrace ->
+ handle_exception(State, 0, {Type, Reason, MethodName, Stacktrace})
+ end.
+
+handle_method0(#'connection.start_ok'{mechanism = Mechanism,
+ response = Response,
+ client_properties = ClientProperties},
+ State0 = #v1{connection_state = starting,
+ connection = Connection0,
+ sock = Sock}) ->
+ AuthMechanism = auth_mechanism_to_module(Mechanism, Sock),
+ Capabilities =
+ case rabbit_misc:table_lookup(ClientProperties, <<"capabilities">>) of
+ {table, Capabilities1} -> Capabilities1;
+ _ -> []
+ end,
+ Connection1 = Connection0#connection{
+ client_properties = ClientProperties,
+ capabilities = Capabilities,
+ auth_mechanism = {Mechanism, AuthMechanism},
+ auth_state = AuthMechanism:init(Sock)},
+ Connection2 = augment_connection_log_name(Connection1),
+ State = State0#v1{connection_state = securing,
+ connection = Connection2},
+ % adding client properties to process dictionary to send them later
+ % in the connection_closed event
+ put(client_properties, ClientProperties),
+ case user_provided_connection_name(Connection2) of
+ undefined ->
+ undefined;
+ UserProvidedConnectionName ->
+ put(connection_user_provided_name, UserProvidedConnectionName)
+ end,
+ auth_phase(Response, State);
+
+handle_method0(#'connection.secure_ok'{response = Response},
+ State = #v1{connection_state = securing}) ->
+ auth_phase(Response, State);
+
+handle_method0(#'connection.tune_ok'{frame_max = FrameMax,
+ channel_max = ChannelMax,
+ heartbeat = ClientHeartbeat},
+ State = #v1{connection_state = tuning,
+ connection = Connection,
+ helper_sup = SupPid,
+ sock = Sock}) ->
+ ok = validate_negotiated_integer_value(
+ frame_max, ?FRAME_MIN_SIZE, FrameMax),
+ ok = validate_negotiated_integer_value(
+ channel_max, ?CHANNEL_MIN, ChannelMax),
+ {ok, Collector} = rabbit_connection_helper_sup:start_queue_collector(
+ SupPid, Connection#connection.name),
+ Frame = rabbit_binary_generator:build_heartbeat_frame(),
+ Parent = self(),
+ SendFun =
+ fun() ->
+ case catch rabbit_net:send(Sock, Frame) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ Parent ! {heartbeat_send_error, Reason};
+ Unexpected ->
+ Parent ! {heartbeat_send_error, Unexpected}
+ end,
+ ok
+ end,
+ ReceiveFun = fun() -> Parent ! heartbeat_timeout end,
+ Heartbeater = rabbit_heartbeat:start(
+ SupPid, Sock, Connection#connection.name,
+ ClientHeartbeat, SendFun, ClientHeartbeat, ReceiveFun),
+ State#v1{connection_state = opening,
+ connection = Connection#connection{
+ frame_max = FrameMax,
+ channel_max = ChannelMax,
+ timeout_sec = ClientHeartbeat},
+ queue_collector = Collector,
+ heartbeater = Heartbeater};
+
+handle_method0(#'connection.open'{virtual_host = VHost},
+ State = #v1{connection_state = opening,
+ connection = Connection = #connection{
+ log_name = ConnName,
+ user = User = #user{username = Username},
+ protocol = Protocol},
+ helper_sup = SupPid,
+ sock = Sock,
+ throttle = Throttle}) ->
+
+ ok = is_over_vhost_connection_limit(VHost, User),
+ ok = is_over_user_connection_limit(User),
+ ok = rabbit_access_control:check_vhost_access(User, VHost, {socket, Sock}, #{}),
+ ok = is_vhost_alive(VHost, User),
+ NewConnection = Connection#connection{vhost = VHost},
+ ok = send_on_channel0(Sock, #'connection.open_ok'{}, Protocol),
+
+ Alarms = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ BlockedBy = sets:from_list([{resource, Alarm} || Alarm <- Alarms]),
+ Throttle1 = Throttle#throttle{blocked_by = BlockedBy},
+
+ {ok, ChannelSupSupPid} =
+ rabbit_connection_helper_sup:start_channel_sup_sup(SupPid),
+ State1 = control_throttle(
+ State#v1{connection_state = running,
+ connection = NewConnection,
+ channel_sup_sup_pid = ChannelSupSupPid,
+ throttle = Throttle1}),
+ Infos = augment_infos_with_user_provided_connection_name(
+ [{type, network} | infos(?CREATION_EVENT_KEYS, State1)],
+ State1
+ ),
+ rabbit_core_metrics:connection_created(proplists:get_value(pid, Infos),
+ Infos),
+ rabbit_event:notify(connection_created, Infos),
+ maybe_emit_stats(State1),
+ rabbit_log_connection:info(
+ "connection ~p (~s): "
+ "user '~s' authenticated and granted access to vhost '~s'~n",
+ [self(), dynamic_connection_name(ConnName), Username, VHost]),
+ State1;
+handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) ->
+ lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
+ maybe_close(State#v1{connection_state = closing});
+handle_method0(#'connection.close'{},
+ State = #v1{connection = #connection{protocol = Protocol},
+ sock = Sock})
+ when ?IS_STOPPING(State) ->
+ %% We're already closed or closing, so we don't need to cleanup
+ %% anything.
+ ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
+ State;
+handle_method0(#'connection.close_ok'{},
+ State = #v1{connection_state = closed}) ->
+ self() ! terminate_connection,
+ State;
+handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reason},
+ State = #v1{connection =
+ #connection{protocol = Protocol,
+ user = User = #user{username = Username},
+ log_name = ConnName} = Conn,
+ sock = Sock}) when ?IS_RUNNING(State) ->
+ rabbit_log_connection:debug(
+ "connection ~p (~s) of user '~s': "
+ "asked to update secret, reason: ~s~n",
+ [self(), dynamic_connection_name(ConnName), Username, Reason]),
+ case rabbit_access_control:update_state(User, NewSecret) of
+ {ok, User1} ->
+ %% User/auth backend state has been updated. Now we can propagate it to channels
+ %% asynchronously and return. All the channels have to do is to update their
+ %% own state.
+ %%
+ %% Any secret update errors coming from the authz backend will be handled in the other branch.
+ %% Therefore we optimistically do no error handling here. MK.
+ lists:foreach(fun(Ch) ->
+ rabbit_log:debug("Updating user/auth backend state for channel ~p", [Ch]),
+ _ = rabbit_channel:update_user_state(Ch, User1)
+ end, all_channels()),
+ ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol),
+ rabbit_log_connection:info(
+ "connection ~p (~s): "
+ "user '~s' updated secret, reason: ~s~n",
+ [self(), dynamic_connection_name(ConnName), Username, Reason]),
+ State#v1{connection = Conn#connection{user = User1}};
+ {refused, Message} ->
+ rabbit_log_connection:error("Secret update was refused for user '~p': ~p",
+ [Username, Message]),
+ rabbit_misc:protocol_error(not_allowed, "New secret was refused by one of the backends", []);
+ {error, Message} ->
+ rabbit_log_connection:error("Secret update for user '~p' failed: ~p",
+ [Username, Message]),
+ rabbit_misc:protocol_error(not_allowed,
+ "Secret update failed", [])
+ end;
+handle_method0(_Method, State) when ?IS_STOPPING(State) ->
+ State;
+handle_method0(_Method, #v1{connection_state = S}) ->
+ rabbit_misc:protocol_error(
+ channel_error, "unexpected method in connection state ~w", [S]).
+
+is_vhost_alive(VHostPath, User) ->
+ case rabbit_vhost_sup_sup:is_vhost_alive(VHostPath) of
+ true -> ok;
+ false ->
+ rabbit_misc:protocol_error(internal_error,
+ "access to vhost '~s' refused for user '~s': "
+ "vhost '~s' is down",
+ [VHostPath, User#user.username, VHostPath])
+ end.
+
+is_over_vhost_connection_limit(VHostPath, User) ->
+ try rabbit_vhost_limit:is_over_connection_limit(VHostPath) of
+ false -> ok;
+ {true, Limit} -> rabbit_misc:protocol_error(not_allowed,
+ "access to vhost '~s' refused for user '~s': "
+ "connection limit (~p) is reached",
+ [VHostPath, User#user.username, Limit])
+ catch
+ throw:{error, {no_such_vhost, VHostPath}} ->
+ rabbit_misc:protocol_error(not_allowed, "vhost ~s not found", [VHostPath])
+ end.
+
+is_over_user_connection_limit(#user{username = Username}) ->
+ case rabbit_auth_backend_internal:is_over_connection_limit(Username) of
+ false -> ok;
+ {true, Limit} -> rabbit_misc:protocol_error(not_allowed,
+ "Connection refused for user '~s': "
+ "user connection limit (~p) is reached",
+ [Username, Limit])
+ end.
+
+validate_negotiated_integer_value(Field, Min, ClientValue) ->
+ ServerValue = get_env(Field),
+ if ClientValue /= 0 andalso ClientValue < Min ->
+ fail_negotiation(Field, min, Min, ClientValue);
+ ServerValue /= 0 andalso (ClientValue =:= 0 orelse
+ ClientValue > ServerValue) ->
+ fail_negotiation(Field, max, ServerValue, ClientValue);
+ true ->
+ ok
+ end.
+
+%% keep dialyzer happy
+-spec fail_negotiation(atom(), 'min' | 'max', integer(), integer()) ->
+ no_return().
+fail_negotiation(Field, MinOrMax, ServerValue, ClientValue) ->
+ {S1, S2} = case MinOrMax of
+ min -> {lower, minimum};
+ max -> {higher, maximum}
+ end,
+ ClientValueDetail = get_client_value_detail(Field, ClientValue),
+ rabbit_misc:protocol_error(
+ not_allowed, "negotiated ~w = ~w~s is ~w than the ~w allowed value (~w)",
+ [Field, ClientValue, ClientValueDetail, S1, S2, ServerValue], 'connection.tune').
+
+get_env(Key) ->
+ {ok, Value} = application:get_env(rabbit, Key),
+ Value.
+
+send_on_channel0(Sock, Method, Protocol) ->
+ ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol).
+
+auth_mechanism_to_module(TypeBin, Sock) ->
+ case rabbit_registry:binary_to_type(TypeBin) of
+ {error, not_found} ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unknown authentication mechanism '~s'",
+ [TypeBin]);
+ T ->
+ case {lists:member(T, auth_mechanisms(Sock)),
+ rabbit_registry:lookup_module(auth_mechanism, T)} of
+ {true, {ok, Module}} ->
+ Module;
+ _ ->
+ rabbit_misc:protocol_error(
+ command_invalid,
+ "invalid authentication mechanism '~s'", [T])
+ end
+ end.
+
+auth_mechanisms(Sock) ->
+ {ok, Configured} = application:get_env(auth_mechanisms),
+ [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism),
+ Module:should_offer(Sock), lists:member(Name, Configured)].
+
+auth_mechanisms_binary(Sock) ->
+ list_to_binary(
+ string:join([atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")).
+
+auth_phase(Response,
+ State = #v1{connection = Connection =
+ #connection{protocol = Protocol,
+ auth_mechanism = {Name, AuthMechanism},
+ auth_state = AuthState},
+ sock = Sock}) ->
+ RemoteAddress = list_to_binary(inet:ntoa(Connection#connection.host)),
+ case AuthMechanism:handle_response(Response, AuthState) of
+ {refused, Username, Msg, Args} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, amqp091),
+ auth_fail(Username, Msg, Args, Name, State);
+ {protocol_error, Msg, Args} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, <<>>, amqp091),
+ notify_auth_result(none, user_authentication_failure,
+ [{error, rabbit_misc:format(Msg, Args)}],
+ State),
+ rabbit_misc:protocol_error(syntax_error, Msg, Args);
+ {challenge, Challenge, AuthState1} ->
+ rabbit_core_metrics:auth_attempt_succeeded(RemoteAddress, <<>>, amqp091),
+ Secure = #'connection.secure'{challenge = Challenge},
+ ok = send_on_channel0(Sock, Secure, Protocol),
+ State#v1{connection = Connection#connection{
+ auth_state = AuthState1}};
+ {ok, User = #user{username = Username}} ->
+ case rabbit_access_control:check_user_loopback(Username, Sock) of
+ ok ->
+ rabbit_core_metrics:auth_attempt_succeeded(RemoteAddress, Username, amqp091),
+ notify_auth_result(Username, user_authentication_success,
+ [], State);
+ not_allowed ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, amqp091),
+ auth_fail(Username, "user '~s' can only connect via "
+ "localhost", [Username], Name, State)
+ end,
+ Tune = #'connection.tune'{frame_max = get_env(frame_max),
+ channel_max = get_env(channel_max),
+ heartbeat = get_env(heartbeat)},
+ ok = send_on_channel0(Sock, Tune, Protocol),
+ State#v1{connection_state = tuning,
+ connection = Connection#connection{user = User,
+ auth_state = none}}
+ end.
+
+-spec auth_fail
+ (rabbit_types:username() | none, string(), [any()], binary(), #v1{}) ->
+ no_return().
+
+auth_fail(Username, Msg, Args, AuthName,
+ State = #v1{connection = #connection{protocol = Protocol,
+ capabilities = Capabilities}}) ->
+ notify_auth_result(Username, user_authentication_failure,
+ [{error, rabbit_misc:format(Msg, Args)}], State),
+ AmqpError = rabbit_misc:amqp_error(
+ access_refused, "~s login refused: ~s",
+ [AuthName, io_lib:format(Msg, Args)], none),
+ case rabbit_misc:table_lookup(Capabilities,
+ <<"authentication_failure_close">>) of
+ {bool, true} ->
+ SafeMsg = io_lib:format(
+ "Login was refused using authentication "
+ "mechanism ~s. For details see the broker "
+ "logfile.", [AuthName]),
+ AmqpError1 = AmqpError#amqp_error{explanation = SafeMsg},
+ {0, CloseMethod} = rabbit_binary_generator:map_exception(
+ 0, AmqpError1, Protocol),
+ ok = send_on_channel0(State#v1.sock, CloseMethod, Protocol);
+ _ -> ok
+ end,
+ rabbit_misc:protocol_error(AmqpError).
+
+notify_auth_result(Username, AuthResult, ExtraProps, State) ->
+ EventProps = [{connection_type, network},
+ {name, case Username of none -> ''; _ -> Username end}] ++
+ [case Item of
+ name -> {connection_name, i(name, State)};
+ _ -> {Item, i(Item, State)}
+ end || Item <- ?AUTH_NOTIFICATION_INFO_KEYS] ++
+ ExtraProps,
+ rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
+
+%%--------------------------------------------------------------------------
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(pid, #v1{}) -> self();
+i(node, #v1{}) -> node();
+i(SockStat, S) when SockStat =:= recv_oct;
+ SockStat =:= recv_cnt;
+ SockStat =:= send_oct;
+ SockStat =:= send_cnt;
+ SockStat =:= send_pend ->
+ socket_info(fun (Sock) -> rabbit_net:getstat(Sock, [SockStat]) end,
+ fun ([{_, I}]) -> I end, S);
+i(ssl, #v1{sock = Sock}) -> rabbit_net:is_ssl(Sock);
+i(ssl_protocol, S) -> ssl_info(fun ({P, _}) -> P end, S);
+i(ssl_key_exchange, S) -> ssl_info(fun ({_, {K, _, _}}) -> K end, S);
+i(ssl_cipher, S) -> ssl_info(fun ({_, {_, C, _}}) -> C end, S);
+i(ssl_hash, S) -> ssl_info(fun ({_, {_, _, H}}) -> H end, S);
+i(peer_cert_issuer, S) -> cert_info(fun rabbit_ssl:peer_cert_issuer/1, S);
+i(peer_cert_subject, S) -> cert_info(fun rabbit_ssl:peer_cert_subject/1, S);
+i(peer_cert_validity, S) -> cert_info(fun rabbit_ssl:peer_cert_validity/1, S);
+i(channels, #v1{channel_count = ChannelCount}) -> ChannelCount;
+i(state, #v1{connection_state = ConnectionState,
+ throttle = #throttle{blocked_by = Reasons,
+ last_blocked_at = T} = Throttle}) ->
+ %% not throttled by resource or other longer-term reasons
+ %% TODO: come up with a sensible function name
+ case sets:size(sets:del_element(flow, Reasons)) =:= 0 andalso
+ (credit_flow:blocked() %% throttled by flow now
+ orelse %% throttled by flow recently
+ (is_blocked_by_flow(Throttle) andalso T =/= never andalso
+ erlang:convert_time_unit(erlang:monotonic_time() - T,
+ native,
+ micro_seconds) < 5000000)) of
+ true -> flow;
+ false ->
+ case {has_reasons_to_block(Throttle), ConnectionState} of
+ %% blocked
+ {_, blocked} -> blocked;
+ %% not yet blocked (there were no publishes)
+ {true, running} -> blocking;
+ %% not blocked
+ {false, _} -> ConnectionState;
+ %% catch all to be defensive
+ _ -> ConnectionState
+ end
+ end;
+i(garbage_collection, _State) ->
+ rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+i(Item, #v1{connection = Conn}) -> ic(Item, Conn).
+
+ic(name, #connection{name = Name}) -> Name;
+ic(host, #connection{host = Host}) -> Host;
+ic(peer_host, #connection{peer_host = PeerHost}) -> PeerHost;
+ic(port, #connection{port = Port}) -> Port;
+ic(peer_port, #connection{peer_port = PeerPort}) -> PeerPort;
+ic(protocol, #connection{protocol = none}) -> none;
+ic(protocol, #connection{protocol = P}) -> P:version();
+ic(user, #connection{user = none}) -> '';
+ic(user, #connection{user = U}) -> U#user.username;
+ic(user_who_performed_action, C) -> ic(user, C);
+ic(vhost, #connection{vhost = VHost}) -> VHost;
+ic(timeout, #connection{timeout_sec = Timeout}) -> Timeout;
+ic(frame_max, #connection{frame_max = FrameMax}) -> FrameMax;
+ic(channel_max, #connection{channel_max = ChMax}) -> ChMax;
+ic(client_properties, #connection{client_properties = CP}) -> CP;
+ic(auth_mechanism, #connection{auth_mechanism = none}) -> none;
+ic(auth_mechanism, #connection{auth_mechanism = {Name, _Mod}}) -> Name;
+ic(connected_at, #connection{connected_at = T}) -> T;
+ic(Item, #connection{}) -> throw({bad_argument, Item}).
+
+socket_info(Get, Select, #v1{sock = Sock}) ->
+ case Get(Sock) of
+ {ok, T} -> case Select(T) of
+ N when is_number(N) -> N;
+ _ -> 0
+ end;
+ {error, _} -> 0
+ end.
+
+ssl_info(F, #v1{sock = Sock}) ->
+ case rabbit_net:ssl_info(Sock) of
+ nossl -> '';
+ {error, _} -> '';
+ {ok, Items} ->
+ P = proplists:get_value(protocol, Items),
+ #{cipher := C,
+ key_exchange := K,
+ mac := H} = proplists:get_value(selected_cipher_suite, Items),
+ F({P, {K, C, H}})
+ end.
+
+cert_info(F, #v1{sock = Sock}) ->
+ case rabbit_net:peercert(Sock) of
+ nossl -> '';
+ {error, _} -> '';
+ {ok, Cert} -> list_to_binary(F(Cert))
+ end.
+
+maybe_emit_stats(State) ->
+ rabbit_event:if_enabled(State, #v1.stats_timer,
+ fun() -> emit_stats(State) end).
+
+emit_stats(State) ->
+ [{_, Pid}, {_, Recv_oct}, {_, Send_oct}, {_, Reductions}] = I
+ = infos(?SIMPLE_METRICS, State),
+ Infos = infos(?OTHER_METRICS, State),
+ rabbit_core_metrics:connection_stats(Pid, Infos),
+ rabbit_core_metrics:connection_stats(Pid, Recv_oct, Send_oct, Reductions),
+ rabbit_event:notify(connection_stats, Infos ++ I),
+ State1 = rabbit_event:reset_stats_timer(State, #v1.stats_timer),
+ ensure_stats_timer(State1).
+
+%% 1.0 stub
+-spec become_1_0(non_neg_integer(), #v1{}) -> no_return().
+
+become_1_0(Id, State = #v1{sock = Sock}) ->
+ case code:is_loaded(rabbit_amqp1_0_reader) of
+ false -> refuse_connection(Sock, amqp1_0_plugin_not_enabled);
+ _ -> Mode = case Id of
+ 0 -> amqp;
+ 3 -> sasl;
+ _ -> refuse_connection(
+ Sock, {unsupported_amqp1_0_protocol_id, Id},
+ {3, 1, 0, 0})
+ end,
+ F = fun (_Deb, Buf, BufLen, S) ->
+ {rabbit_amqp1_0_reader, init,
+ [Mode, pack_for_1_0(Buf, BufLen, S)]}
+ end,
+ State#v1{connection_state = {become, F}}
+ end.
+
+pack_for_1_0(Buf, BufLen, #v1{parent = Parent,
+ sock = Sock,
+ recv_len = RecvLen,
+ pending_recv = PendingRecv,
+ helper_sup = SupPid,
+ proxy_socket = ProxySocket}) ->
+ {Parent, Sock, RecvLen, PendingRecv, SupPid, Buf, BufLen, ProxySocket}.
+
+respond_and_close(State, Channel, Protocol, Reason, LogErr) ->
+ log_hard_error(State, Channel, LogErr),
+ send_error_on_channel0_and_close(Channel, Protocol, Reason, State).
+
+send_error_on_channel0_and_close(Channel, Protocol, Reason, State) ->
+ {0, CloseMethod} =
+ rabbit_binary_generator:map_exception(Channel, Reason, Protocol),
+ State1 = close_connection(terminate_channels(State)),
+ ok = send_on_channel0(State#v1.sock, CloseMethod, Protocol),
+ State1.
+
+%%
+%% Publisher throttling
+%%
+
+blocked_by_message(#throttle{blocked_by = Reasons}) ->
+ %% we don't want to report internal flow as a reason here since
+ %% it is entirely transient
+ Reasons1 = sets:del_element(flow, Reasons),
+ RStr = string:join([format_blocked_by(R) || R <- sets:to_list(Reasons1)], " & "),
+ list_to_binary(rabbit_misc:format("low on ~s", [RStr])).
+
+format_blocked_by({resource, memory}) -> "memory";
+format_blocked_by({resource, disk}) -> "disk";
+format_blocked_by({resource, disc}) -> "disk".
+
+update_last_blocked_at(Throttle) ->
+ Throttle#throttle{last_blocked_at = erlang:monotonic_time()}.
+
+connection_blocked_message_sent(
+ #throttle{connection_blocked_message_sent = BS}) -> BS.
+
+should_send_blocked(Throttle = #throttle{blocked_by = Reasons}) ->
+ should_block(Throttle)
+ andalso
+ sets:size(sets:del_element(flow, Reasons)) =/= 0
+ andalso
+ not connection_blocked_message_sent(Throttle).
+
+should_send_unblocked(Throttle = #throttle{blocked_by = Reasons}) ->
+ connection_blocked_message_sent(Throttle)
+ andalso
+ sets:size(sets:del_element(flow, Reasons)) == 0.
+
+%% Returns true if we have a reason to block
+%% this connection.
+has_reasons_to_block(#throttle{blocked_by = Reasons}) ->
+ sets:size(Reasons) > 0.
+
+is_blocked_by_flow(#throttle{blocked_by = Reasons}) ->
+ sets:is_element(flow, Reasons).
+
+should_block(#throttle{should_block = Val}) -> Val.
+
+should_block_connection(Throttle) ->
+ should_block(Throttle) andalso has_reasons_to_block(Throttle).
+
+should_unblock_connection(Throttle) ->
+ not should_block_connection(Throttle).
+
+maybe_block(State = #v1{connection_state = CS, throttle = Throttle}) ->
+ case should_block_connection(Throttle) of
+ true ->
+ State1 = State#v1{connection_state = blocked,
+ throttle = update_last_blocked_at(Throttle)},
+ case CS of
+ running ->
+ ok = rabbit_heartbeat:pause_monitor(State#v1.heartbeater);
+ _ -> ok
+ end,
+ maybe_send_blocked_or_unblocked(State1);
+ false -> State
+ end.
+
+maybe_unblock(State = #v1{throttle = Throttle}) ->
+ case should_unblock_connection(Throttle) of
+ true ->
+ ok = rabbit_heartbeat:resume_monitor(State#v1.heartbeater),
+ State1 = State#v1{connection_state = running,
+ throttle = Throttle#throttle{should_block = false}},
+ maybe_send_unblocked(State1);
+ false -> State
+ end.
+
+maybe_send_unblocked(State = #v1{throttle = Throttle}) ->
+ case should_send_unblocked(Throttle) of
+ true ->
+ ok = send_unblocked(State),
+ State#v1{throttle =
+ Throttle#throttle{connection_blocked_message_sent = false}};
+ false -> State
+ end.
+
+maybe_send_blocked_or_unblocked(State = #v1{throttle = Throttle}) ->
+ case should_send_blocked(Throttle) of
+ true ->
+ ok = send_blocked(State, blocked_by_message(Throttle)),
+ State#v1{throttle =
+ Throttle#throttle{connection_blocked_message_sent = true}};
+ false -> maybe_send_unblocked(State)
+ end.
+
+publish_received(State = #v1{throttle = Throttle}) ->
+ case has_reasons_to_block(Throttle) of
+ false -> State;
+ true ->
+ Throttle1 = Throttle#throttle{should_block = true},
+ maybe_block(State#v1{throttle = Throttle1})
+ end.
+
+control_throttle(State = #v1{connection_state = CS,
+ throttle = #throttle{blocked_by = Reasons} = Throttle}) ->
+ Throttle1 = case credit_flow:blocked() of
+ true ->
+ Throttle#throttle{blocked_by = sets:add_element(flow, Reasons)};
+ false ->
+ Throttle#throttle{blocked_by = sets:del_element(flow, Reasons)}
+ end,
+ State1 = State#v1{throttle = Throttle1},
+ case CS of
+ running -> maybe_block(State1);
+ %% unblock or re-enable blocking
+ blocked -> maybe_block(maybe_unblock(State1));
+ _ -> State1
+ end.
+
+augment_connection_log_name(#connection{name = Name} = Connection) ->
+ case user_provided_connection_name(Connection) of
+ undefined ->
+ Connection;
+ UserSpecifiedName ->
+ LogName = <<Name/binary, " - ", UserSpecifiedName/binary>>,
+ rabbit_log_connection:info("Connection ~p (~s) has a client-provided name: ~s~n", [self(), Name, UserSpecifiedName]),
+ ?store_proc_name(LogName),
+ Connection#connection{log_name = LogName}
+ end.
+
+augment_infos_with_user_provided_connection_name(Infos, #v1{connection = Connection}) ->
+ case user_provided_connection_name(Connection) of
+ undefined ->
+ Infos;
+ UserProvidedConnectionName ->
+ [{user_provided_name, UserProvidedConnectionName} | Infos]
+ end.
+
+user_provided_connection_name(#connection{client_properties = ClientProperties}) ->
+ case rabbit_misc:table_lookup(ClientProperties, <<"connection_name">>) of
+ {longstr, UserSpecifiedName} ->
+ UserSpecifiedName;
+ _ ->
+ undefined
+ end.
+
+dynamic_connection_name(Default) ->
+ case rabbit_misc:get_proc_name() of
+ {ok, Name} ->
+ Name;
+ _ ->
+ Default
+ end.
+
+handle_uncontrolled_channel_close(ChPid) ->
+ rabbit_core_metrics:channel_closed(ChPid),
+ rabbit_event:notify(channel_closed, [{pid, ChPid}]).
+
+-spec get_client_value_detail(atom(), integer()) -> string().
+get_client_value_detail(channel_max, 0) ->
+ " (no limit)";
+get_client_value_detail(_Field, _ClientValue) ->
+ "".
diff --git a/deps/rabbit/src/rabbit_recovery_terms.erl b/deps/rabbit/src/rabbit_recovery_terms.erl
new file mode 100644
index 0000000000..d89de9ece3
--- /dev/null
+++ b/deps/rabbit/src/rabbit_recovery_terms.erl
@@ -0,0 +1,240 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% We use a gen_server simply so that during the terminate/2 call
+%% (i.e., during shutdown), we can sync/flush the dets table to disk.
+
+-module(rabbit_recovery_terms).
+
+-behaviour(gen_server).
+
+-export([start/1, stop/1, store/3, read/2, clear/1]).
+
+-export([start_link/1]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([upgrade_recovery_terms/0, persistent_bytes/0]).
+-export([open_global_table/0, close_global_table/0,
+ read_global/1, delete_global_table/0]).
+-export([open_table/1, close_table/1]).
+
+-rabbit_upgrade({upgrade_recovery_terms, local, []}).
+-rabbit_upgrade({persistent_bytes, local, [upgrade_recovery_terms]}).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start(rabbit_types:vhost()) -> rabbit_types:ok_or_error(term()).
+
+start(VHost) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ {ok, _} = supervisor2:start_child(
+ VHostSup,
+ {?MODULE,
+ {?MODULE, start_link, [VHost]},
+ transient, ?WORKER_WAIT, worker,
+ [?MODULE]});
+ %% we can get here if a vhost is added and removed concurrently
+ %% e.g. some integration tests do it
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to start a recovery terms manager for vhost ~s: vhost no longer exists!",
+ [VHost])
+ end,
+ ok.
+
+-spec stop(rabbit_types:vhost()) -> rabbit_types:ok_or_error(term()).
+
+stop(VHost) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ case supervisor:terminate_child(VHostSup, ?MODULE) of
+ ok -> supervisor:delete_child(VHostSup, ?MODULE);
+ E -> E
+ end;
+ %% see start/1
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to stop a recovery terms manager for vhost ~s: vhost no longer exists!",
+ [VHost]),
+
+ ok
+ end.
+
+-spec store(rabbit_types:vhost(), file:filename(), term()) -> rabbit_types:ok_or_error(term()).
+
+store(VHost, DirBaseName, Terms) ->
+ dets:insert(VHost, {DirBaseName, Terms}).
+
+-spec read(rabbit_types:vhost(), file:filename()) -> rabbit_types:ok_or_error2(term(), not_found).
+
+read(VHost, DirBaseName) ->
+ case dets:lookup(VHost, DirBaseName) of
+ [{_, Terms}] -> {ok, Terms};
+ _ -> {error, not_found}
+ end.
+
+-spec clear(rabbit_types:vhost()) -> 'ok'.
+
+clear(VHost) ->
+ try
+ dets:delete_all_objects(VHost)
+ %% see start/1
+ catch _:badarg ->
+ rabbit_log:error("Failed to clear recovery terms for vhost ~s: table no longer exists!",
+ [VHost]),
+ ok
+ end,
+ flush(VHost).
+
+start_link(VHost) ->
+ gen_server:start_link(?MODULE, [VHost], []).
+
+%%----------------------------------------------------------------------------
+
+upgrade_recovery_terms() ->
+ open_global_table(),
+ try
+ QueuesDir = filename:join(rabbit_mnesia:dir(), "queues"),
+ Dirs = case rabbit_file:list_dir(QueuesDir) of
+ {ok, Entries} -> Entries;
+ {error, _} -> []
+ end,
+ [begin
+ File = filename:join([QueuesDir, Dir, "clean.dot"]),
+ case rabbit_file:read_term_file(File) of
+ {ok, Terms} -> ok = store_global_table(Dir, Terms);
+ {error, _} -> ok
+ end,
+ file:delete(File)
+ end || Dir <- Dirs],
+ ok
+ after
+ close_global_table()
+ end.
+
+persistent_bytes() -> dets_upgrade(fun persistent_bytes/1).
+persistent_bytes(Props) -> Props ++ [{persistent_bytes, 0}].
+
+dets_upgrade(Fun)->
+ open_global_table(),
+ try
+ ok = dets:foldl(fun ({DirBaseName, Terms}, Acc) ->
+ store_global_table(DirBaseName, Fun(Terms)),
+ Acc
+ end, ok, ?MODULE),
+ ok
+ after
+ close_global_table()
+ end.
+
+open_global_table() ->
+ File = filename:join(rabbit_mnesia:dir(), "recovery.dets"),
+ {ok, _} = dets:open_file(?MODULE, [{file, File},
+ {ram_file, true},
+ {auto_save, infinity}]),
+ ok.
+
+close_global_table() ->
+ try
+ dets:sync(?MODULE),
+ dets:close(?MODULE)
+ %% see clear/1
+ catch _:badarg ->
+ rabbit_log:error("Failed to clear global recovery terms: table no longer exists!",
+ []),
+ ok
+ end.
+
+store_global_table(DirBaseName, Terms) ->
+ dets:insert(?MODULE, {DirBaseName, Terms}).
+
+read_global(DirBaseName) ->
+ case dets:lookup(?MODULE, DirBaseName) of
+ [{_, Terms}] -> {ok, Terms};
+ _ -> {error, not_found}
+ end.
+
+delete_global_table() ->
+ file:delete(filename:join(rabbit_mnesia:dir(), "recovery.dets")).
+
+%%----------------------------------------------------------------------------
+
+init([VHost]) ->
+ process_flag(trap_exit, true),
+ open_table(VHost),
+ {ok, VHost}.
+
+handle_call(Msg, _, State) -> {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(Msg, State) -> {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(_Info, State) -> {noreply, State}.
+
+terminate(_Reason, VHost) ->
+ close_table(VHost).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+-spec open_table(vhost:name()) -> rabbit_types:ok_or_error(any()).
+
+open_table(VHost) ->
+ open_table(VHost, 10).
+
+-spec open_table(vhost:name(), non_neg_integer()) -> rabbit_types:ok_or_error(any()).
+
+open_table(VHost, RetriesLeft) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ File = filename:join(VHostDir, "recovery.dets"),
+ Opts = [{file, File},
+ {ram_file, true},
+ {auto_save, infinity}],
+ case dets:open_file(VHost, Opts) of
+ {ok, _} -> ok;
+ {error, Error} ->
+ case RetriesLeft of
+ 0 ->
+ {error, Error};
+ N when is_integer(N) ->
+ _ = file:delete(File),
+ %% Wait before retrying
+ DelayInMs = 1000,
+ rabbit_log:warning("Failed to open a recovery terms DETS file at ~p. Will delete it and retry in ~p ms (~p retries left)",
+ [File, DelayInMs, RetriesLeft]),
+ timer:sleep(DelayInMs),
+ open_table(VHost, RetriesLeft - 1)
+ end
+ end.
+
+-spec flush(vhost:name()) -> rabbit_types:ok_or_error(any()).
+
+flush(VHost) ->
+ try
+ dets:sync(VHost)
+ %% see clear/1
+ catch _:badarg ->
+ rabbit_log:error("Failed to sync recovery terms table for vhost ~s: the table no longer exists!",
+ [VHost]),
+ ok
+ end.
+
+-spec close_table(vhost:name()) -> rabbit_types:ok_or_error(any()).
+
+close_table(VHost) ->
+ try
+ ok = flush(VHost),
+ ok = dets:close(VHost)
+ %% see clear/1
+ catch _:badarg ->
+ rabbit_log:error("Failed to close recovery terms table for vhost ~s: the table no longer exists!",
+ [VHost]),
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_restartable_sup.erl b/deps/rabbit/src/rabbit_restartable_sup.erl
new file mode 100644
index 0000000000..46fcace99f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_restartable_sup.erl
@@ -0,0 +1,33 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_restartable_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/3]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(DELAY, 2).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(atom(), rabbit_types:mfargs(), boolean()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Name, {_M, _F, _A} = Fun, Delay) ->
+ supervisor2:start_link({local, Name}, ?MODULE, [Fun, Delay]).
+
+init([{Mod, _F, _A} = Fun, Delay]) ->
+ {ok, {{one_for_one, 10, 10},
+ [{Mod, Fun, case Delay of
+ true -> {transient, 1};
+ false -> transient
+ end, ?WORKER_WAIT, worker, [Mod]}]}}.
diff --git a/deps/rabbit/src/rabbit_router.erl b/deps/rabbit/src/rabbit_router.erl
new file mode 100644
index 0000000000..ed170bcd8e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_router.erl
@@ -0,0 +1,65 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_router).
+-include_lib("stdlib/include/qlc.hrl").
+-include("rabbit.hrl").
+
+-export([match_bindings/2, match_routing_key/2]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([routing_key/0, match_result/0]).
+
+-type routing_key() :: binary().
+-type match_result() :: [rabbit_types:binding_destination()].
+
+-spec match_bindings(rabbit_types:binding_source(),
+ fun ((rabbit_types:binding()) -> boolean())) ->
+ match_result().
+-spec match_routing_key(rabbit_types:binding_source(),
+ [routing_key()] | ['_']) ->
+ match_result().
+
+%%----------------------------------------------------------------------------
+
+match_bindings(SrcName, Match) ->
+ MatchHead = #route{binding = #binding{source = SrcName,
+ _ = '_'}},
+ Routes = ets:select(rabbit_route, [{MatchHead, [], [['$_']]}]),
+ [Dest || [#route{binding = Binding = #binding{destination = Dest}}] <-
+ Routes, Match(Binding)].
+
+match_routing_key(SrcName, [RoutingKey]) ->
+ find_routes(#route{binding = #binding{source = SrcName,
+ destination = '$1',
+ key = RoutingKey,
+ _ = '_'}},
+ []);
+match_routing_key(SrcName, [_|_] = RoutingKeys) ->
+ find_routes(#route{binding = #binding{source = SrcName,
+ destination = '$1',
+ key = '$2',
+ _ = '_'}},
+ [list_to_tuple(['orelse' | [{'=:=', '$2', RKey} ||
+ RKey <- RoutingKeys]])]).
+
+%%--------------------------------------------------------------------
+
+%% Normally we'd call mnesia:dirty_select/2 here, but that is quite
+%% expensive for the same reasons as above, and, additionally, due to
+%% mnesia 'fixing' the table with ets:safe_fixtable/2, which is wholly
+%% unnecessary. According to the ets docs (and the code in erl_db.c),
+%% 'select' is safe anyway ("Functions that internally traverse over a
+%% table, like select and match, will give the same guarantee as
+%% safe_fixtable.") and, furthermore, even the lower level iterators
+%% ('first' and 'next') are safe on ordered_set tables ("Note that for
+%% tables of the ordered_set type, safe_fixtable/2 is not necessary as
+%% calls to first/1 and next/2 will always succeed."), which
+%% rabbit_route is.
+find_routes(MatchHead, Conditions) ->
+ ets:select(rabbit_route, [{MatchHead, Conditions, ['$1']}]).
diff --git a/deps/rabbit/src/rabbit_runtime_parameters.erl b/deps/rabbit/src/rabbit_runtime_parameters.erl
new file mode 100644
index 0000000000..1870b5dfa5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_runtime_parameters.erl
@@ -0,0 +1,412 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_runtime_parameters).
+
+%% Runtime parameters are bits of configuration that are
+%% set, as the name implies, at runtime and not in the config file.
+%%
+%% The benefits of storing some bits of configuration at runtime vary:
+%%
+%% * Some parameters are vhost-specific
+%% * Others are specific to individual nodes
+%% * ...or even queues, exchanges, etc
+%%
+%% The most obvious use case for runtime parameters is policies but
+%% there are others:
+%%
+%% * Plugin-specific parameters that only make sense at runtime,
+%% e.g. Federation and Shovel link settings
+%% * Exchange and queue decorators
+%%
+%% Parameters are grouped by components, e.g. <<"policy">> or <<"shovel">>.
+%% Components are mapped to modules that perform validation.
+%% Runtime parameter values are then looked up by the modules that
+%% need to use them.
+%%
+%% Parameters are stored in Mnesia and can be global. Their changes
+%% are broadcasted over rabbit_event.
+%%
+%% Global parameters keys are atoms and values are JSON documents.
+%%
+%% See also:
+%%
+%% * rabbit_policies
+%% * rabbit_policy
+%% * rabbit_registry
+%% * rabbit_event
+
+-include("rabbit.hrl").
+
+-export([parse_set/5, set/5, set_any/5, clear/4, clear_any/4, list/0, list/1,
+ list_component/1, list/2, list_formatted/1, list_formatted/3,
+ lookup/3, value/3, value/4, info_keys/0, clear_component/2]).
+
+-export([parse_set_global/3, set_global/3, value_global/1, value_global/2,
+ list_global/0, list_global_formatted/0, list_global_formatted/2,
+ lookup_global/1, global_info_keys/0, clear_global/2]).
+
+%%----------------------------------------------------------------------------
+
+-type ok_or_error_string() :: 'ok' | {'error_string', string()}.
+-type ok_thunk_or_error_string() :: ok_or_error_string() | fun(() -> 'ok').
+
+-spec parse_set(rabbit_types:vhost(), binary(), binary(), string(),
+ rabbit_types:user() | rabbit_types:username() | 'none')
+ -> ok_or_error_string().
+-spec set(rabbit_types:vhost(), binary(), binary(), term(),
+ rabbit_types:user() | rabbit_types:username() | 'none')
+ -> ok_or_error_string().
+-spec set_any(rabbit_types:vhost(), binary(), binary(), term(),
+ rabbit_types:user() | rabbit_types:username() | 'none')
+ -> ok_or_error_string().
+-spec set_global(atom(), term(), rabbit_types:username()) -> 'ok'.
+-spec clear(rabbit_types:vhost(), binary(), binary(), rabbit_types:username())
+ -> ok_thunk_or_error_string().
+-spec clear_any(rabbit_types:vhost(), binary(), binary(), rabbit_types:username())
+ -> ok_thunk_or_error_string().
+-spec list() -> [rabbit_types:infos()].
+-spec list(rabbit_types:vhost() | '_') -> [rabbit_types:infos()].
+-spec list_component(binary()) -> [rabbit_types:infos()].
+-spec list(rabbit_types:vhost() | '_', binary() | '_')
+ -> [rabbit_types:infos()].
+-spec list_formatted(rabbit_types:vhost()) -> [rabbit_types:infos()].
+-spec list_formatted(rabbit_types:vhost(), reference(), pid()) -> 'ok'.
+-spec lookup(rabbit_types:vhost(), binary(), binary())
+ -> rabbit_types:infos() | 'not_found'.
+-spec value(rabbit_types:vhost(), binary(), binary()) -> term().
+-spec value(rabbit_types:vhost(), binary(), binary(), term()) -> term().
+-spec value_global(atom()) -> term() | 'not_found'.
+-spec value_global(atom(), term()) -> term().
+-spec info_keys() -> rabbit_types:info_keys().
+
+%%---------------------------------------------------------------------------
+
+-import(rabbit_misc, [pget/2]).
+
+-define(TABLE, rabbit_runtime_parameters).
+
+%%---------------------------------------------------------------------------
+
+parse_set(_, <<"policy">>, _, _, _) ->
+ {error_string, "policies may not be set using this method"};
+parse_set(VHost, Component, Name, String, User) ->
+ Definition = rabbit_data_coercion:to_binary(String),
+ case rabbit_json:try_decode(Definition) of
+ {ok, Term} when is_map(Term) -> set(VHost, Component, Name, maps:to_list(Term), User);
+ {ok, Term} -> set(VHost, Component, Name, Term, User);
+ {error, Reason} ->
+ {error_string,
+ rabbit_misc:format("JSON decoding error. Reason: ~ts", [Reason])}
+ end.
+
+set(_, <<"policy">>, _, _, _) ->
+ {error_string, "policies may not be set using this method"};
+set(VHost, Component, Name, Term, User) ->
+ set_any(VHost, Component, Name, Term, User).
+
+parse_set_global(Name, String, ActingUser) ->
+ Definition = rabbit_data_coercion:to_binary(String),
+ case rabbit_json:try_decode(Definition) of
+ {ok, Term} when is_map(Term) -> set_global(Name, maps:to_list(Term), ActingUser);
+ {ok, Term} -> set_global(Name, Term, ActingUser);
+ {error, Reason} ->
+ {error_string,
+ rabbit_misc:format("JSON decoding error. Reason: ~ts", [Reason])}
+ end.
+
+set_global(Name, Term, ActingUser) ->
+ NameAsAtom = rabbit_data_coercion:to_atom(Name),
+ rabbit_log:debug("Setting global parameter '~s' to ~p", [NameAsAtom, Term]),
+ mnesia_update(NameAsAtom, Term),
+ event_notify(parameter_set, none, global, [{name, NameAsAtom},
+ {value, Term},
+ {user_who_performed_action, ActingUser}]),
+ ok.
+
+format_error(L) ->
+ {error_string, rabbit_misc:format_many([{"Validation failed~n", []} | L])}.
+
+set_any(VHost, Component, Name, Term, User) ->
+ case set_any0(VHost, Component, Name, Term, User) of
+ ok -> ok;
+ {errors, L} -> format_error(L)
+ end.
+
+set_any0(VHost, Component, Name, Term, User) ->
+ rabbit_log:debug("Asked to set or update runtime parameter '~s' in vhost '~s' "
+ "for component '~s', value: ~p",
+ [Name, VHost, Component, Term]),
+ case lookup_component(Component) of
+ {ok, Mod} ->
+ case flatten_errors(
+ Mod:validate(VHost, Component, Name, Term, get_user(User))) of
+ ok ->
+ case mnesia_update(VHost, Component, Name, Term) of
+ {old, Term} ->
+ ok;
+ _ ->
+ ActingUser = get_username(User),
+ event_notify(
+ parameter_set, VHost, Component,
+ [{name, Name},
+ {value, Term},
+ {user_who_performed_action, ActingUser}]),
+ Mod:notify(VHost, Component, Name, Term, ActingUser)
+ end,
+ ok;
+ E ->
+ E
+ end;
+ E ->
+ E
+ end.
+
+%% Validate only an user record as expected by the API before #rabbitmq-event-exchange-10
+get_user(#user{} = User) ->
+ User;
+get_user(_) ->
+ none.
+
+get_username(#user{username = Username}) ->
+ Username;
+get_username(none) ->
+ ?INTERNAL_USER;
+get_username(Any) ->
+ Any.
+
+mnesia_update(Key, Term) ->
+ rabbit_misc:execute_mnesia_transaction(mnesia_update_fun(Key, Term)).
+
+mnesia_update(VHost, Comp, Name, Term) ->
+ rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with(VHost, mnesia_update_fun({VHost, Comp, Name}, Term))).
+
+mnesia_update_fun(Key, Term) ->
+ fun () ->
+ Res = case mnesia:read(?TABLE, Key, read) of
+ [] -> new;
+ [Params] -> {old, Params#runtime_parameters.value}
+ end,
+ ok = mnesia:write(?TABLE, c(Key, Term), write),
+ Res
+ end.
+
+clear(_, <<"policy">> , _, _) ->
+ {error_string, "policies may not be cleared using this method"};
+clear(VHost, Component, Name, ActingUser) ->
+ clear_any(VHost, Component, Name, ActingUser).
+
+clear_global(Key, ActingUser) ->
+ KeyAsAtom = rabbit_data_coercion:to_atom(Key),
+ Notify = fun() ->
+ event_notify(parameter_set, none, global,
+ [{name, KeyAsAtom},
+ {user_who_performed_action, ActingUser}]),
+ ok
+ end,
+ case value_global(KeyAsAtom) of
+ not_found ->
+ {error_string, "Parameter does not exist"};
+ _ ->
+ F = fun () ->
+ ok = mnesia:delete(?TABLE, KeyAsAtom, write)
+ end,
+ ok = rabbit_misc:execute_mnesia_transaction(F),
+ case mnesia:is_transaction() of
+ true -> Notify;
+ false -> Notify()
+ end
+ end.
+
+clear_component(Component, ActingUser) ->
+ case list_component(Component) of
+ [] ->
+ ok;
+ Xs ->
+ [clear(pget(vhost, X),
+ pget(component, X),
+ pget(name, X),
+ ActingUser) || X <- Xs],
+ ok
+ end.
+
+clear_any(VHost, Component, Name, ActingUser) ->
+ Notify = fun () ->
+ case lookup_component(Component) of
+ {ok, Mod} -> event_notify(
+ parameter_cleared, VHost, Component,
+ [{name, Name},
+ {user_who_performed_action, ActingUser}]),
+ Mod:notify_clear(VHost, Component, Name, ActingUser);
+ _ -> ok
+ end
+ end,
+ case lookup(VHost, Component, Name) of
+ not_found -> {error_string, "Parameter does not exist"};
+ _ -> mnesia_clear(VHost, Component, Name),
+ case mnesia:is_transaction() of
+ true -> Notify;
+ false -> Notify()
+ end
+ end.
+
+mnesia_clear(VHost, Component, Name) ->
+ F = fun () ->
+ ok = mnesia:delete(?TABLE, {VHost, Component, Name}, write)
+ end,
+ ok = rabbit_misc:execute_mnesia_transaction(rabbit_vhost:with(VHost, F)).
+
+event_notify(_Event, _VHost, <<"policy">>, _Props) ->
+ ok;
+event_notify(Event, none, Component, Props) ->
+ rabbit_event:notify(Event, [{component, Component} | Props]);
+event_notify(Event, VHost, Component, Props) ->
+ rabbit_event:notify(Event, [{vhost, VHost},
+ {component, Component} | Props]).
+
+list() ->
+ [p(P) || #runtime_parameters{ key = {_VHost, Comp, _Name}} = P <-
+ rabbit_misc:dirty_read_all(?TABLE), Comp /= <<"policy">>].
+
+list(VHost) -> list(VHost, '_').
+list_component(Component) -> list('_', Component).
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+list(VHost, Component) ->
+ mnesia:async_dirty(
+ fun () ->
+ case VHost of
+ '_' -> ok;
+ _ -> rabbit_vhost:assert(VHost)
+ end,
+ Match = #runtime_parameters{key = {VHost, Component, '_'},
+ _ = '_'},
+ [p(P) || #runtime_parameters{key = {_VHost, Comp, _Name}} = P <-
+ mnesia:match_object(?TABLE, Match, read),
+ Comp =/= <<"policy">> orelse Component =:= <<"policy">>]
+ end).
+
+list_global() ->
+ %% list only atom keys
+ mnesia:async_dirty(
+ fun () ->
+ Match = #runtime_parameters{key = '_', _ = '_'},
+ [p(P) || P <- mnesia:match_object(?TABLE, Match, read),
+ is_atom(P#runtime_parameters.key)]
+ end).
+
+list_formatted(VHost) ->
+ [ format_parameter(info_keys(), P) || P <- list(VHost) ].
+
+format_parameter(InfoKeys, P) ->
+ lists:foldr(fun
+ (value, Acc) ->
+ [{value, rabbit_json:encode(pget(value, P))} | Acc];
+ (Key, Acc) ->
+ case lists:keyfind(Key, 1, P) of
+ false -> Acc;
+ {Key, Val} -> [{Key, Val} | Acc]
+ end
+ end,
+ [], InfoKeys).
+
+list_formatted(VHost, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(P) -> format_parameter(info_keys(), P) end, list(VHost)).
+
+list_global_formatted() ->
+ [ format_parameter(global_info_keys(), P) || P <- list_global() ].
+
+list_global_formatted(Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(P) -> format_parameter(global_info_keys(), P) end, list_global()).
+
+lookup(VHost, Component, Name) ->
+ case lookup0({VHost, Component, Name}, rabbit_misc:const(not_found)) of
+ not_found -> not_found;
+ Params -> p(Params)
+ end.
+
+lookup_global(Name) ->
+ case lookup0(Name, rabbit_misc:const(not_found)) of
+ not_found -> not_found;
+ Params -> p(Params)
+ end.
+
+value(VHost, Comp, Name) -> value0({VHost, Comp, Name}).
+value(VHost, Comp, Name, Def) -> value0({VHost, Comp, Name}, Def).
+
+value_global(Key) ->
+ value0(Key).
+
+value_global(Key, Default) ->
+ value0(Key, Default).
+
+value0(Key) ->
+ case lookup0(Key, rabbit_misc:const(not_found)) of
+ not_found -> not_found;
+ Params -> Params#runtime_parameters.value
+ end.
+
+value0(Key, Default) ->
+ Params = lookup0(Key, fun () -> lookup_missing(Key, Default) end),
+ Params#runtime_parameters.value.
+
+lookup0(Key, DefaultFun) ->
+ case mnesia:dirty_read(?TABLE, Key) of
+ [] -> DefaultFun();
+ [R] -> R
+ end.
+
+lookup_missing(Key, Default) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:read(?TABLE, Key, read) of
+ [] -> Record = c(Key, Default),
+ mnesia:write(?TABLE, Record, write),
+ Record;
+ [R] -> R
+ end
+ end).
+
+c(Key, Default) ->
+ #runtime_parameters{key = Key,
+ value = Default}.
+
+p(#runtime_parameters{key = {VHost, Component, Name}, value = Value}) ->
+ [{vhost, VHost},
+ {component, Component},
+ {name, Name},
+ {value, Value}];
+
+p(#runtime_parameters{key = Key, value = Value}) when is_atom(Key) ->
+ [{name, Key},
+ {value, Value}].
+
+info_keys() -> [component, name, value].
+
+global_info_keys() -> [name, value].
+
+%%---------------------------------------------------------------------------
+
+lookup_component(Component) ->
+ case rabbit_registry:lookup_module(
+ runtime_parameter, list_to_atom(binary_to_list(Component))) of
+ {error, not_found} -> {errors,
+ [{"component ~s not found", [Component]}]};
+ {ok, Module} -> {ok, Module}
+ end.
+
+flatten_errors(L) ->
+ case [{F, A} || I <- lists:flatten([L]), {error, F, A} <- [I]] of
+ [] -> ok;
+ E -> {errors, E}
+ end.
diff --git a/deps/rabbit/src/rabbit_ssl.erl b/deps/rabbit/src/rabbit_ssl.erl
new file mode 100644
index 0000000000..84670b0a19
--- /dev/null
+++ b/deps/rabbit/src/rabbit_ssl.erl
@@ -0,0 +1,195 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ssl).
+
+-include_lib("public_key/include/public_key.hrl").
+
+-export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]).
+-export([peer_cert_subject_items/2, peer_cert_auth_name/1]).
+-export([cipher_suites_erlang/2, cipher_suites_erlang/1,
+ cipher_suites_openssl/2, cipher_suites_openssl/1,
+ cipher_suites/1]).
+
+%%--------------------------------------------------------------------------
+
+-export_type([certificate/0]).
+
+% Due to API differences between OTP releases.
+-dialyzer(no_missing_calls).
+-ignore_xref([{ssl_cipher_format, suite_legacy, 1},
+ {ssl_cipher_format, suite, 1},
+ {ssl_cipher_format, suite_to_str, 1},
+ {ssl_cipher_format, erl_suite_definition, 1},
+ {ssl_cipher_format, suite_map_to_openssl_str, 1},
+ {ssl_cipher_format, suite_map_to_bin, 1}]).
+
+-type certificate() :: rabbit_cert_info:certificate().
+
+-type cipher_suites_mode() :: default | all | anonymous.
+
+-spec cipher_suites(cipher_suites_mode()) -> ssl:ciphers().
+cipher_suites(Mode) ->
+ Version = get_highest_protocol_version(),
+ ssl:cipher_suites(Mode, Version).
+
+-spec cipher_suites_erlang(cipher_suites_mode()) ->
+ [ssl:old_cipher_suite()].
+cipher_suites_erlang(Mode) ->
+ Version = get_highest_protocol_version(),
+ cipher_suites_erlang(Mode, Version).
+
+-spec cipher_suites_erlang(cipher_suites_mode(),
+ ssl:protocol_version() | tls_record:tls_version()) ->
+ [ssl:old_cipher_suite()].
+cipher_suites_erlang(Mode, Version) ->
+ [ format_cipher_erlang(C)
+ || C <- ssl:cipher_suites(Mode, Version) ].
+
+-spec cipher_suites_openssl(cipher_suites_mode()) ->
+ [ssl:old_cipher_suite()].
+cipher_suites_openssl(Mode) ->
+ Version = get_highest_protocol_version(),
+ cipher_suites_openssl(Mode, Version).
+
+-spec cipher_suites_openssl(cipher_suites_mode(),
+ ssl:protocol_version() | tls_record:tls_version()) ->
+ [ssl:old_cipher_suite()].
+cipher_suites_openssl(Mode, Version) ->
+ lists:filtermap(fun(C) ->
+ OpenSSL = format_cipher_openssl(C),
+ case is_list(OpenSSL) of
+ true -> {true, OpenSSL};
+ false -> false
+ end
+ end,
+ ssl:cipher_suites(Mode, Version)).
+
+
+format_cipher_erlang(Cipher) ->
+ case erlang:function_exported(ssl_cipher_format, suite_map_to_bin, 1) of
+ true ->
+ format_cipher_erlang22(Cipher);
+ false ->
+ format_cipher_erlang21(Cipher)
+ end.
+
+format_cipher_erlang22(Cipher) ->
+ ssl_cipher_format:suite_legacy(ssl_cipher_format:suite_map_to_bin(Cipher)).
+
+format_cipher_erlang21(Cipher) ->
+ ssl_cipher_format:erl_suite_definition(ssl_cipher_format:suite(Cipher)).
+
+
+format_cipher_openssl(Cipher) ->
+ case erlang:function_exported(ssl_cipher_format, suite_map_to_bin, 1) of
+ true ->
+ format_cipher_openssl22(Cipher);
+ false ->
+ format_cipher_openssl21(Cipher)
+ end.
+
+format_cipher_openssl22(Cipher) ->
+ ssl_cipher_format:suite_map_to_openssl_str(Cipher).
+
+format_cipher_openssl21(Cipher) ->
+ ssl_cipher_format:suite_to_str(Cipher).
+
+-spec get_highest_protocol_version() -> tls_record:tls_atom_version().
+get_highest_protocol_version() ->
+ tls_record:protocol_version(
+ tls_record:highest_protocol_version([])).
+
+%%--------------------------------------------------------------------------
+%% High-level functions used by reader
+%%--------------------------------------------------------------------------
+
+%% Return a string describing the certificate's issuer.
+peer_cert_issuer(Cert) ->
+ rabbit_cert_info:issuer(Cert).
+
+%% Return a string describing the certificate's subject, as per RFC4514.
+peer_cert_subject(Cert) ->
+ rabbit_cert_info:subject(Cert).
+
+%% Return the parts of the certificate's subject.
+peer_cert_subject_items(Cert, Type) ->
+ rabbit_cert_info:subject_items(Cert, Type).
+
+%% Filters certificate SAN extensions by (OTP) SAN type name.
+peer_cert_subject_alternative_names(Cert, Type) ->
+ SANs = rabbit_cert_info:subject_alternative_names(Cert),
+ lists:filter(fun({Key, _}) -> Key =:= Type end, SANs).
+
+%% Return a string describing the certificate's validity.
+peer_cert_validity(Cert) ->
+ rabbit_cert_info:validity(Cert).
+
+%% Extract a username from the certificate
+-spec peer_cert_auth_name
+ (certificate()) -> binary() | 'not_found' | 'unsafe'.
+
+peer_cert_auth_name(Cert) ->
+ {ok, Mode} = application:get_env(rabbit, ssl_cert_login_from),
+ peer_cert_auth_name(Mode, Cert).
+
+peer_cert_auth_name(distinguished_name, Cert) ->
+ case auth_config_sane() of
+ true -> iolist_to_binary(peer_cert_subject(Cert));
+ false -> unsafe
+ end;
+
+peer_cert_auth_name(subject_alt_name, Cert) ->
+ peer_cert_auth_name(subject_alternative_name, Cert);
+
+peer_cert_auth_name(subject_alternative_name, Cert) ->
+ case auth_config_sane() of
+ true ->
+ Type = application:get_env(rabbit, ssl_cert_login_san_type, dns),
+ %% lists:nth/2 is 1-based
+ Index = application:get_env(rabbit, ssl_cert_login_san_index, 0) + 1,
+ OfType = peer_cert_subject_alternative_names(Cert, otp_san_type(Type)),
+ rabbit_log:debug("Peer certificate SANs of type ~s: ~p, index to use with lists:nth/2: ~b", [Type, OfType, Index]),
+ case length(OfType) of
+ 0 -> not_found;
+ N when N < Index -> not_found;
+ N when N >= Index ->
+ {_, Value} = lists:nth(Index, OfType),
+ rabbit_data_coercion:to_binary(Value)
+ end;
+ false -> unsafe
+ end;
+
+peer_cert_auth_name(common_name, Cert) ->
+ %% If there is more than one CN then we join them with "," in a
+ %% vaguely DN-like way. But this is more just so we do something
+ %% more intelligent than crashing, if you actually want to escape
+ %% things properly etc, use DN mode.
+ case auth_config_sane() of
+ true -> case peer_cert_subject_items(Cert, ?'id-at-commonName') of
+ not_found -> not_found;
+ CNs -> list_to_binary(string:join(CNs, ","))
+ end;
+ false -> unsafe
+ end.
+
+auth_config_sane() ->
+ {ok, Opts} = application:get_env(rabbit, ssl_options),
+ case proplists:get_value(verify, Opts) of
+ verify_peer -> true;
+ V -> rabbit_log:warning("TLS peer verification (authentication) is "
+ "disabled, ssl_options.verify value used: ~p. "
+ "See https://www.rabbitmq.com/ssl.html#peer-verification to learn more.", [V]),
+ false
+ end.
+
+otp_san_type(dns) -> dNSName;
+otp_san_type(ip) -> iPAddress;
+otp_san_type(email) -> rfc822Name;
+otp_san_type(uri) -> uniformResourceIdentifier;
+otp_san_type(other_name) -> otherName;
+otp_san_type(Other) -> Other.
diff --git a/deps/rabbit/src/rabbit_stream_coordinator.erl b/deps/rabbit/src/rabbit_stream_coordinator.erl
new file mode 100644
index 0000000000..9e4890c894
--- /dev/null
+++ b/deps/rabbit/src/rabbit_stream_coordinator.erl
@@ -0,0 +1,949 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_stream_coordinator).
+
+-behaviour(ra_machine).
+
+-export([start/0]).
+-export([format_ra_event/2]).
+
+-export([init/1,
+ apply/3,
+ state_enter/2,
+ init_aux/1,
+ handle_aux/6,
+ tick/2]).
+
+-export([recover/0,
+ start_cluster/1,
+ delete_cluster/2,
+ add_replica/2,
+ delete_replica/2]).
+
+-export([policy_changed/1]).
+
+-export([phase_repair_mnesia/2,
+ phase_start_cluster/1,
+ phase_delete_cluster/2,
+ phase_check_quorum/1,
+ phase_start_new_leader/1,
+ phase_stop_replicas/1,
+ phase_start_replica/3,
+ phase_delete_replica/2]).
+
+-export([log_overview/1]).
+
+-define(STREAM_COORDINATOR_STARTUP, {stream_coordinator_startup, self()}).
+-define(TICK_TIMEOUT, 60000).
+-define(RESTART_TIMEOUT, 1000).
+-define(PHASE_RETRY_TIMEOUT, 10000).
+-define(CMD_TIMEOUT, 30000).
+
+-record(?MODULE, {streams, monitors}).
+
+start() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ ServerId = {?MODULE, node()},
+ case ra:restart_server(ServerId) of
+ {error, Reason} when Reason == not_started orelse
+ Reason == name_not_registered ->
+ case ra:start_server(make_ra_conf(node(), Nodes)) of
+ ok ->
+ global:set_lock(?STREAM_COORDINATOR_STARTUP),
+ case find_members(Nodes) of
+ [] ->
+ %% We're the first (and maybe only) one
+ ra:trigger_election(ServerId);
+ Members ->
+ %% What to do if we get a timeout?
+ {ok, _, _} = ra:add_member(Members, ServerId, 30000)
+ end,
+ global:del_lock(?STREAM_COORDINATOR_STARTUP),
+ _ = ra:members(ServerId),
+ ok;
+ Error ->
+ exit(Error)
+ end;
+ ok ->
+ ok;
+ Error ->
+ exit(Error)
+ end.
+
+find_members([]) ->
+ [];
+find_members([Node | Nodes]) ->
+ case ra:members({?MODULE, Node}) of
+ {_, Members, _} ->
+ Members;
+ {error, noproc} ->
+ find_members(Nodes);
+ {timeout, _} ->
+ %% not sure what to do here
+ find_members(Nodes)
+ end.
+
+recover() ->
+ ra:restart_server({?MODULE, node()}).
+
+start_cluster(Q) ->
+ process_command({start_cluster, #{queue => Q}}).
+
+delete_cluster(StreamId, ActingUser) ->
+ process_command({delete_cluster, #{stream_id => StreamId, acting_user => ActingUser}}).
+
+add_replica(StreamId, Node) ->
+ process_command({start_replica, #{stream_id => StreamId, node => Node,
+ retries => 1}}).
+
+policy_changed(StreamId) ->
+ process_command({policy_changed, #{stream_id => StreamId}}).
+
+delete_replica(StreamId, Node) ->
+ process_command({delete_replica, #{stream_id => StreamId, node => Node}}).
+
+process_command(Cmd) ->
+ global:set_lock(?STREAM_COORDINATOR_STARTUP),
+ Servers = ensure_coordinator_started(),
+ global:del_lock(?STREAM_COORDINATOR_STARTUP),
+ process_command(Servers, Cmd).
+
+process_command([], _Cmd) ->
+ {error, coordinator_unavailable};
+process_command([Server | Servers], {CmdName, _} = Cmd) ->
+ case ra:process_command(Server, Cmd, ?CMD_TIMEOUT) of
+ {timeout, _} ->
+ rabbit_log:warning("Coordinator timeout on server ~p when processing command ~p",
+ [Server, CmdName]),
+ process_command(Servers, Cmd);
+ {error, noproc} ->
+ process_command(Servers, Cmd);
+ Reply ->
+ Reply
+ end.
+
+ensure_coordinator_started() ->
+ Local = {?MODULE, node()},
+ AllNodes = all_nodes(),
+ case ra:restart_server(Local) of
+ {error, Reason} when Reason == not_started orelse
+ Reason == name_not_registered ->
+ OtherNodes = all_nodes() -- [Local],
+ %% We can't use find_members/0 here as a process that timeouts means the cluster is up
+ case lists:filter(fun(N) -> global:whereis_name(N) =/= undefined end, OtherNodes) of
+ [] ->
+ start_coordinator_cluster();
+ _ ->
+ OtherNodes
+ end;
+ ok ->
+ AllNodes;
+ {error, {already_started, _}} ->
+ AllNodes;
+ _ ->
+ AllNodes
+ end.
+
+start_coordinator_cluster() ->
+ Nodes = rabbit_mnesia:cluster_nodes(running),
+ case ra:start_cluster([make_ra_conf(Node, Nodes) || Node <- Nodes]) of
+ {ok, Started, _} ->
+ Started;
+ {error, cluster_not_formed} ->
+ rabbit_log:warning("Stream coordinator cluster not formed", []),
+ []
+ end.
+
+all_nodes() ->
+ Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
+ [{?MODULE, Node} || Node <- [node() | Nodes]].
+
+init(_Conf) ->
+ #?MODULE{streams = #{},
+ monitors = #{}}.
+
+apply(#{from := From}, {policy_changed, #{stream_id := StreamId}} = Cmd,
+ #?MODULE{streams = Streams0} = State) ->
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, ok, []};
+ #{conf := Conf,
+ state := running} ->
+ case rabbit_stream_queue:update_stream_conf(Conf) of
+ Conf ->
+ %% No changes, ensure we only trigger an election if it's a must
+ {State, ok, []};
+ _ ->
+ {State, ok, [{mod_call, osiris_writer, stop, [Conf]}]}
+ end;
+ SState0 ->
+ Streams = maps:put(StreamId, add_pending_cmd(From, Cmd, SState0), Streams0),
+ {State#?MODULE{streams = Streams}, '$ra_no_reply', []}
+
+ end;
+apply(#{from := From}, {start_cluster, #{queue := Q}}, #?MODULE{streams = Streams} = State) ->
+ #{name := StreamId} = Conf0 = amqqueue:get_type_state(Q),
+ Conf = apply_leader_locator_strategy(Conf0, Streams),
+ case maps:is_key(StreamId, Streams) of
+ true ->
+ {State, '$ra_no_reply', wrap_reply(From, {error, already_started})};
+ false ->
+ Phase = phase_start_cluster,
+ PhaseArgs = [amqqueue:set_type_state(Q, Conf)],
+ SState = #{state => start_cluster,
+ phase => Phase,
+ phase_args => PhaseArgs,
+ conf => Conf,
+ reply_to => From,
+ pending_cmds => [],
+ pending_replicas => []},
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering phase_start_cluster", [StreamId]),
+ {State#?MODULE{streams = maps:put(StreamId, SState, Streams)}, '$ra_no_reply',
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]}
+ end;
+apply(_Meta, {start_cluster_reply, Q}, #?MODULE{streams = Streams,
+ monitors = Monitors0} = State) ->
+ #{name := StreamId,
+ leader_pid := LeaderPid,
+ replica_pids := ReplicaPids} = Conf = amqqueue:get_type_state(Q),
+ SState0 = maps:get(StreamId, Streams),
+ Phase = phase_repair_mnesia,
+ PhaseArgs = [new, Q],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs},
+ Monitors = lists:foldl(fun(Pid, M) ->
+ maps:put(Pid, {StreamId, follower}, M)
+ end, maps:put(LeaderPid, {StreamId, leader}, Monitors0), ReplicaPids),
+ MonitorActions = [{monitor, process, Pid} || Pid <- ReplicaPids ++ [LeaderPid]],
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p "
+ "after start_cluster_reply", [StreamId, Phase]),
+ {State#?MODULE{streams = maps:put(StreamId, SState, Streams),
+ monitors = Monitors}, ok,
+ MonitorActions ++ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+apply(_Meta, {start_replica_failed, StreamId, Node, Retries, Reply},
+ #?MODULE{streams = Streams0} = State) ->
+ rabbit_log:debug("rabbit_stream_coordinator: ~p start replica failed", [StreamId]),
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, {error, not_found}, []};
+ #{pending_replicas := Pending,
+ reply_to := From} = SState ->
+ Streams = Streams0#{StreamId => clear_stream_state(SState#{pending_replicas =>
+ add_unique(Node, Pending)})},
+ reply_and_run_pending(
+ From, StreamId, ok, Reply,
+ [{timer, {pipeline,
+ [{start_replica, #{stream_id => StreamId,
+ node => Node,
+ from => undefined,
+ retries => Retries + 1}}]},
+ ?RESTART_TIMEOUT * Retries}],
+ State#?MODULE{streams = Streams})
+ end;
+apply(_Meta, {phase_finished, StreamId, Reply}, #?MODULE{streams = Streams0} = State) ->
+ rabbit_log:debug("rabbit_stream_coordinator: ~p phase finished", [StreamId]),
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, {error, not_found}, []};
+ #{reply_to := From} = SState ->
+ Streams = Streams0#{StreamId => clear_stream_state(SState)},
+ reply_and_run_pending(From, StreamId, ok, Reply, [], State#?MODULE{streams = Streams})
+ end;
+apply(#{from := From}, {start_replica, #{stream_id := StreamId, node := Node,
+ retries := Retries}} = Cmd,
+ #?MODULE{streams = Streams0} = State) ->
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ case From of
+ undefined ->
+ {State, ok, []};
+ _ ->
+ {State, '$ra_no_reply', wrap_reply(From, {error, not_found})}
+ end;
+ #{conf := Conf,
+ state := running} = SState0 ->
+ Phase = phase_start_replica,
+ PhaseArgs = [Node, Conf, Retries],
+ SState = update_stream_state(From, start_replica, Phase, PhaseArgs, SState0),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p on node ~p",
+ [StreamId, Phase, Node]),
+ {State#?MODULE{streams = Streams0#{StreamId => SState}}, '$ra_no_reply',
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+ SState0 ->
+ Streams = maps:put(StreamId, add_pending_cmd(From, Cmd, SState0), Streams0),
+ {State#?MODULE{streams = Streams}, '$ra_no_reply', []}
+ end;
+apply(_Meta, {start_replica_reply, StreamId, Pid},
+ #?MODULE{streams = Streams, monitors = Monitors0} = State) ->
+ case maps:get(StreamId, Streams, undefined) of
+ undefined ->
+ {State, {error, not_found}, []};
+ #{conf := Conf0} = SState0 ->
+ #{replica_nodes := Replicas0,
+ replica_pids := ReplicaPids0} = Conf0,
+ {ReplicaPids, MaybePid} = delete_replica_pid(node(Pid), ReplicaPids0),
+ Conf = Conf0#{replica_pids => [Pid | ReplicaPids],
+ replica_nodes => add_unique(node(Pid), Replicas0)},
+ Phase = phase_repair_mnesia,
+ PhaseArgs = [update, Conf],
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p after start replica", [StreamId, Phase]),
+ #{pending_replicas := Pending} = SState0 = maps:get(StreamId, Streams),
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs,
+ pending_replicas => lists:delete(node(Pid), Pending)},
+ Monitors1 = Monitors0#{Pid => {StreamId, follower}},
+ Monitors = case MaybePid of
+ [P] -> maps:remove(P, Monitors1);
+ _ -> Monitors1
+ end,
+ {State#?MODULE{streams = Streams#{StreamId => SState},
+ monitors = Monitors}, ok,
+ [{monitor, process, Pid}, {aux, {phase, StreamId, Phase, PhaseArgs}}]}
+ end;
+apply(#{from := From}, {delete_replica, #{stream_id := StreamId, node := Node}} = Cmd,
+ #?MODULE{streams = Streams0,
+ monitors = Monitors0} = State) ->
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, '$ra_no_reply', wrap_reply(From, {error, not_found})};
+ #{conf := Conf0,
+ state := running,
+ pending_replicas := Pending0} = SState0 ->
+ Replicas0 = maps:get(replica_nodes, Conf0),
+ ReplicaPids0 = maps:get(replica_pids, Conf0),
+ case lists:member(Node, Replicas0) of
+ false ->
+ reply_and_run_pending(From, StreamId, '$ra_no_reply', ok, [], State);
+ true ->
+ [Pid] = lists:filter(fun(P) -> node(P) == Node end, ReplicaPids0),
+ ReplicaPids = lists:delete(Pid, ReplicaPids0),
+ Replicas = lists:delete(Node, Replicas0),
+ Pending = lists:delete(Node, Pending0),
+ Conf = Conf0#{replica_pids => ReplicaPids,
+ replica_nodes => Replicas},
+ Phase = phase_delete_replica,
+ PhaseArgs = [Node, Conf],
+ SState = update_stream_state(From, delete_replica,
+ Phase, PhaseArgs,
+ SState0#{conf => Conf0,
+ pending_replicas => Pending}),
+ Monitors = maps:remove(Pid, Monitors0),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p on node ~p", [StreamId, Phase, Node]),
+ {State#?MODULE{monitors = Monitors,
+ streams = Streams0#{StreamId => SState}},
+ '$ra_no_reply',
+ [{demonitor, process, Pid},
+ {aux, {phase, StreamId, Phase, PhaseArgs}}]}
+ end;
+ SState0 ->
+ Streams = maps:put(StreamId, add_pending_cmd(From, Cmd, SState0), Streams0),
+ {State#?MODULE{streams = Streams}, '$ra_no_reply', []}
+ end;
+apply(#{from := From}, {delete_cluster, #{stream_id := StreamId,
+ acting_user := ActingUser}} = Cmd,
+ #?MODULE{streams = Streams0, monitors = Monitors0} = State) ->
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, '$ra_no_reply', wrap_reply(From, {ok, 0})};
+ #{conf := Conf,
+ state := running} = SState0 ->
+ ReplicaPids = maps:get(replica_pids, Conf),
+ LeaderPid = maps:get(leader_pid, Conf),
+ Monitors = lists:foldl(fun(Pid, M) ->
+ maps:remove(Pid, M)
+ end, Monitors0, ReplicaPids ++ [LeaderPid]),
+ Phase = phase_delete_cluster,
+ PhaseArgs = [Conf, ActingUser],
+ SState = update_stream_state(From, delete_cluster, Phase, PhaseArgs, SState0),
+ Demonitors = [{demonitor, process, Pid} || Pid <- [LeaderPid | ReplicaPids]],
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p",
+ [StreamId, Phase]),
+ {State#?MODULE{monitors = Monitors,
+ streams = Streams0#{StreamId => SState}}, '$ra_no_reply',
+ Demonitors ++ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+ SState0 ->
+ Streams = maps:put(StreamId, add_pending_cmd(From, Cmd, SState0), Streams0),
+ {State#?MODULE{streams = Streams}, '$ra_no_reply', []}
+ end;
+apply(_Meta, {delete_cluster_reply, StreamId}, #?MODULE{streams = Streams} = State0) ->
+ #{reply_to := From,
+ pending_cmds := Pending} = maps:get(StreamId, Streams),
+ State = State0#?MODULE{streams = maps:remove(StreamId, Streams)},
+ rabbit_log:debug("rabbit_stream_coordinator: ~p finished delete_cluster_reply",
+ [StreamId]),
+ Actions = [{ra, pipeline_command, [{?MODULE, node()}, Cmd]} || Cmd <- Pending],
+ {State, ok, Actions ++ wrap_reply(From, {ok, 0})};
+apply(_Meta, {down, Pid, _Reason} = Cmd, #?MODULE{streams = Streams,
+ monitors = Monitors0} = State) ->
+ case maps:get(Pid, Monitors0, undefined) of
+ {StreamId, Role} ->
+ Monitors = maps:remove(Pid, Monitors0),
+ case maps:get(StreamId, Streams, undefined) of
+ #{state := delete_cluster} ->
+ {State#?MODULE{monitors = Monitors}, ok, []};
+ undefined ->
+ {State#?MODULE{monitors = Monitors}, ok, []};
+ #{state := running,
+ conf := #{replica_pids := Pids} = Conf0,
+ pending_cmds := Pending0} = SState0 ->
+ case Role of
+ leader ->
+ rabbit_log:info("rabbit_stream_coordinator: ~p leader is down, starting election", [StreamId]),
+ Phase = phase_stop_replicas,
+ PhaseArgs = [Conf0],
+ SState = update_stream_state(undefined, leader_election, Phase, PhaseArgs, SState0),
+ Events = [{demonitor, process, P} || P <- Pids],
+ Monitors1 = lists:foldl(fun(P, M) ->
+ maps:remove(P, M)
+ end, Monitors, Pids),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p", [StreamId, Phase]),
+ {State#?MODULE{monitors = Monitors1,
+ streams = Streams#{StreamId => SState}},
+ ok, Events ++ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+ follower ->
+ case rabbit_misc:is_process_alive(maps:get(leader_pid, Conf0)) of
+ true ->
+ Phase = phase_start_replica,
+ PhaseArgs = [node(Pid), Conf0, 1],
+ SState = update_stream_state(undefined,
+ replica_restart,
+ Phase, PhaseArgs,
+ SState0),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p replica on node ~p is down, entering ~p", [StreamId, node(Pid), Phase]),
+ {State#?MODULE{monitors = Monitors,
+ streams = Streams#{StreamId => SState}},
+ ok, [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+ false ->
+ SState = SState0#{pending_cmds => Pending0 ++ [Cmd]},
+ reply_and_run_pending(undefined, StreamId, ok, ok, [], State#?MODULE{streams = Streams#{StreamId => SState}})
+ end
+ end;
+ #{pending_cmds := Pending0} = SState0 ->
+ SState = SState0#{pending_cmds => Pending0 ++ [Cmd]},
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, ok, []}
+ end;
+ undefined ->
+ {State, ok, []}
+ end;
+apply(_Meta, {start_leader_election, StreamId, NewEpoch, Offsets},
+ #?MODULE{streams = Streams} = State) ->
+ #{conf := Conf0} = SState0 = maps:get(StreamId, Streams),
+ #{leader_node := Leader,
+ replica_nodes := Replicas,
+ replica_pids := ReplicaPids0} = Conf0,
+ NewLeader = find_max_offset(Offsets),
+ rabbit_log:info("rabbit_stream_coordinator: ~p starting new leader on node ~p",
+ [StreamId, NewLeader]),
+ {ReplicaPids, _} = delete_replica_pid(NewLeader, ReplicaPids0),
+ Conf = rabbit_stream_queue:update_stream_conf(
+ Conf0#{epoch => NewEpoch,
+ leader_node => NewLeader,
+ replica_nodes => lists:delete(NewLeader, Replicas ++ [Leader]),
+ replica_pids => ReplicaPids}),
+ Phase = phase_start_new_leader,
+ PhaseArgs = [Conf],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs},
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering phase_start_new_leader",
+ [StreamId]),
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, ok,
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+apply(_Meta, {leader_elected, StreamId, NewLeaderPid},
+ #?MODULE{streams = Streams, monitors = Monitors0} = State) ->
+ rabbit_log:info("rabbit_stream_coordinator: ~p leader elected", [StreamId]),
+ #{conf := Conf0,
+ pending_cmds := Pending0} = SState0 = maps:get(StreamId, Streams),
+ #{leader_pid := LeaderPid,
+ replica_nodes := Replicas} = Conf0,
+ Conf = Conf0#{leader_pid => NewLeaderPid},
+ Phase = phase_repair_mnesia,
+ PhaseArgs = [update, Conf],
+ Pending = Pending0 ++ [{start_replica, #{stream_id => StreamId, node => R,
+ retries => 1, from => undefined}}
+ || R <- Replicas],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs,
+ pending_replicas => Replicas,
+ pending_cmds => Pending},
+ Monitors = maps:put(NewLeaderPid, {StreamId, leader}, maps:remove(LeaderPid, Monitors0)),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p after "
+ "leader election", [StreamId, Phase]),
+ {State#?MODULE{streams = Streams#{StreamId => SState},
+ monitors = Monitors}, ok,
+ [{monitor, process, NewLeaderPid},
+ {aux, {phase, StreamId, Phase, PhaseArgs}}]};
+apply(_Meta, {replicas_stopped, StreamId}, #?MODULE{streams = Streams} = State) ->
+ case maps:get(StreamId, Streams, undefined) of
+ undefined ->
+ {State, {error, not_found}, []};
+ #{conf := Conf0} = SState0 ->
+ Phase = phase_check_quorum,
+ Conf = Conf0#{replica_pids => []},
+ PhaseArgs = [Conf],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs},
+ rabbit_log:info("rabbit_stream_coordinator: ~p all replicas have been stopped, "
+ "checking quorum available", [StreamId]),
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, ok,
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]}
+ end;
+apply(_Meta, {stream_updated, #{name := StreamId} = Conf}, #?MODULE{streams = Streams} = State) ->
+ SState0 = maps:get(StreamId, Streams),
+ Phase = phase_repair_mnesia,
+ PhaseArgs = [update, Conf],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs},
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p after"
+ " stream_updated", [StreamId, Phase]),
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, ok,
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+apply(_, {timeout, {pipeline, Cmds}}, State) ->
+ Actions = [{mod_call, ra, pipeline_command, [{?MODULE, node()}, Cmd]} || Cmd <- Cmds],
+ {State, ok, Actions};
+apply(_, {timeout, {aux, Cmd}}, State) ->
+ {State, ok, [{aux, Cmd}]};
+apply(Meta, {_, #{from := From}} = Cmd, State) ->
+ ?MODULE:apply(Meta#{from => From}, Cmd, State).
+
+state_enter(leader, #?MODULE{streams = Streams, monitors = Monitors}) ->
+ maps:fold(fun(_, #{conf := #{name := StreamId},
+ pending_replicas := Pending,
+ state := State,
+ phase := Phase,
+ phase_args := PhaseArgs}, Acc) ->
+ restart_aux_phase(State, Phase, PhaseArgs, StreamId) ++
+ pipeline_restart_replica_cmds(StreamId, Pending) ++
+ Acc
+ end, [{monitor, process, P} || P <- maps:keys(Monitors)], Streams);
+state_enter(follower, #?MODULE{monitors = Monitors}) ->
+ [{monitor, process, P} || P <- maps:keys(Monitors)];
+state_enter(recover, _) ->
+ put('$rabbit_vm_category', ?MODULE),
+ [];
+state_enter(_, _) ->
+ [].
+
+restart_aux_phase(running, _, _, _) ->
+ [];
+restart_aux_phase(_State, Phase, PhaseArgs, StreamId) ->
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}].
+
+pipeline_restart_replica_cmds(StreamId, Pending) ->
+ [{timer, {pipeline, [{start_replica, #{stream_id => StreamId,
+ node => Node,
+ from => undefined,
+ retries => 1}}
+ || Node <- Pending]}, ?RESTART_TIMEOUT}].
+
+tick(_Ts, _State) ->
+ [{aux, maybe_resize_coordinator_cluster}].
+
+maybe_resize_coordinator_cluster() ->
+ spawn(fun() ->
+ case ra:members({?MODULE, node()}) of
+ {_, Members, _} ->
+ MemberNodes = [Node || {_, Node} <- Members],
+ Running = rabbit_mnesia:cluster_nodes(running),
+ All = rabbit_mnesia:cluster_nodes(all),
+ case Running -- MemberNodes of
+ [] ->
+ ok;
+ New ->
+ rabbit_log:warning("New rabbit node(s) detected, "
+ "adding stream coordinator in: ~p", [New]),
+ add_members(Members, New)
+ end,
+ case MemberNodes -- All of
+ [] ->
+ ok;
+ Old ->
+ rabbit_log:warning("Rabbit node(s) removed from the cluster, "
+ "deleting stream coordinator in: ~p", [Old]),
+ remove_members(Members, Old)
+ end;
+ _ ->
+ ok
+ end
+ end).
+
+add_members(_, []) ->
+ ok;
+add_members(Members, [Node | Nodes]) ->
+ Conf = make_ra_conf(Node, [N || {_, N} <- Members]),
+ case ra:start_server(Conf) of
+ ok ->
+ case ra:add_member(Members, {?MODULE, Node}) of
+ {ok, NewMembers, _} ->
+ add_members(NewMembers, Nodes);
+ _ ->
+ add_members(Members, Nodes)
+ end;
+ Error ->
+ rabbit_log:warning("Stream coordinator failed to start on node ~p : ~p",
+ [Node, Error]),
+ add_members(Members, Nodes)
+ end.
+
+remove_members(_, []) ->
+ ok;
+remove_members(Members, [Node | Nodes]) ->
+ case ra:remove_member(Members, {?MODULE, Node}) of
+ {ok, NewMembers, _} ->
+ remove_members(NewMembers, Nodes);
+ _ ->
+ remove_members(Members, Nodes)
+ end.
+
+init_aux(_Name) ->
+ {#{}, undefined}.
+
+%% TODO ensure the dead writer is restarted as a replica at some point in time, increasing timeout?
+handle_aux(leader, _, maybe_resize_coordinator_cluster, {Monitors, undefined}, LogState, _) ->
+ Pid = maybe_resize_coordinator_cluster(),
+ {no_reply, {Monitors, Pid}, LogState, [{monitor, process, aux, Pid}]};
+handle_aux(leader, _, maybe_resize_coordinator_cluster, AuxState, LogState, _) ->
+ %% Coordinator resizing is still happening, let's ignore this tick event
+ {no_reply, AuxState, LogState};
+handle_aux(leader, _, {down, Pid, _}, {Monitors, Pid}, LogState, _) ->
+ %% Coordinator resizing has finished
+ {no_reply, {Monitors, undefined}, LogState};
+handle_aux(leader, _, {phase, _, Fun, Args} = Cmd, {Monitors, Coordinator}, LogState, _) ->
+ Pid = erlang:apply(?MODULE, Fun, Args),
+ Actions = [{monitor, process, aux, Pid}],
+ {no_reply, {maps:put(Pid, Cmd, Monitors), Coordinator}, LogState, Actions};
+handle_aux(leader, _, {down, Pid, normal}, {Monitors, Coordinator}, LogState, _) ->
+ {no_reply, {maps:remove(Pid, Monitors), Coordinator}, LogState};
+handle_aux(leader, _, {down, Pid, Reason}, {Monitors0, Coordinator}, LogState, _) ->
+ %% The phase has failed, let's retry it
+ case maps:get(Pid, Monitors0) of
+ {phase, StreamId, phase_start_new_leader, Args} ->
+ rabbit_log:warning("Error while starting new leader for stream queue ~p, "
+ "restarting election: ~p", [StreamId, Reason]),
+ Monitors = maps:remove(Pid, Monitors0),
+ Cmd = {phase, StreamId, phase_check_quorum, Args},
+ {no_reply, {Monitors, Coordinator}, LogState, [{timer, {aux, Cmd}, ?PHASE_RETRY_TIMEOUT}]};
+ {phase, StreamId, Fun, _} = Cmd ->
+ rabbit_log:warning("Error while executing coordinator phase ~p for stream queue ~p ~p",
+ [Fun, StreamId, Reason]),
+ Monitors = maps:remove(Pid, Monitors0),
+ {no_reply, {Monitors, Coordinator}, LogState, [{timer, {aux, Cmd}, ?PHASE_RETRY_TIMEOUT}]}
+ end;
+handle_aux(_, _, _, AuxState, LogState, _) ->
+ {no_reply, AuxState, LogState}.
+
+reply_and_run_pending(From, StreamId, Reply, WrapReply, Actions0, #?MODULE{streams = Streams} = State) ->
+ #{pending_cmds := Pending} = SState0 = maps:get(StreamId, Streams),
+ AuxActions = [{mod_call, ra, pipeline_command, [{?MODULE, node()}, Cmd]}
+ || Cmd <- Pending],
+ SState = maps:put(pending_cmds, [], SState0),
+ Actions = case From of
+ undefined ->
+ AuxActions ++ Actions0;
+ _ ->
+ wrap_reply(From, WrapReply) ++ AuxActions ++ Actions0
+ end,
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, Reply, Actions}.
+
+wrap_reply(From, Reply) ->
+ [{reply, From, {wrap_reply, Reply}}].
+
+add_pending_cmd(From, {CmdName, CmdMap}, #{pending_cmds := Pending0} = StreamState) ->
+ %% Remove from pending the leader election and automatic replica restart when
+ %% the command is delete_cluster
+ Pending = case CmdName of
+ delete_cluster ->
+ lists:filter(fun({down, _, _}) ->
+ false;
+ (_) ->
+ true
+ end, Pending0);
+ _ ->
+ Pending0
+ end,
+ maps:put(pending_cmds, Pending ++ [{CmdName, maps:put(from, From, CmdMap)}],
+ StreamState).
+
+clear_stream_state(StreamState) ->
+ StreamState#{reply_to => undefined,
+ state => running,
+ phase => undefined,
+ phase_args => undefined}.
+
+update_stream_state(From, State, Phase, PhaseArgs, StreamState) ->
+ StreamState#{reply_to => From,
+ state => State,
+ phase => Phase,
+ phase_args => PhaseArgs}.
+
+phase_start_replica(Node, #{name := StreamId} = Conf0,
+ Retries) ->
+ spawn(
+ fun() ->
+ %% If a new leader hasn't yet been elected, this will fail with a badmatch
+ %% as get_reader_context returns a no proc. An unhandled failure will
+ %% crash this monitored process and restart it later.
+ %% TODO However, do we want that crash in the log? We might need to try/catch
+ %% to provide a log message instead as it's 'expected'. We could try to
+ %% verify first that the leader is alive, but there would still be potential
+ %% for a race condition in here.
+ try
+ case osiris_replica:start(Node, Conf0) of
+ {ok, Pid} ->
+ ra:pipeline_command({?MODULE, node()},
+ {start_replica_reply, StreamId, Pid});
+ {error, already_present} ->
+ ra:pipeline_command({?MODULE, node()}, {phase_finished, StreamId, ok});
+ {error, {already_started, _}} ->
+ ra:pipeline_command({?MODULE, node()}, {phase_finished, StreamId, ok});
+ {error, Reason} = Error ->
+ rabbit_log:warning("Error while starting replica for ~p : ~p",
+ [maps:get(name, Conf0), Reason]),
+ ra:pipeline_command({?MODULE, node()},
+ {start_replica_failed, StreamId, Node, Retries, Error})
+ end
+ catch _:E->
+ rabbit_log:warning("Error while starting replica for ~p : ~p",
+ [maps:get(name, Conf0), E]),
+ ra:pipeline_command({?MODULE, node()},
+ {start_replica_failed, StreamId, Node, Retries, {error, E}})
+ end
+ end).
+
+phase_delete_replica(Node, Conf) ->
+ spawn(
+ fun() ->
+ ok = osiris_replica:delete(Node, Conf),
+ ra:pipeline_command({?MODULE, node()}, {stream_updated, Conf})
+ end).
+
+phase_stop_replicas(#{replica_nodes := Replicas,
+ name := StreamId} = Conf) ->
+ spawn(
+ fun() ->
+ [try
+ osiris_replica:stop(Node, Conf)
+ catch _:{{nodedown, _}, _} ->
+ %% It could be the old leader that is still down, it's normal.
+ ok
+ end || Node <- Replicas],
+ ra:pipeline_command({?MODULE, node()}, {replicas_stopped, StreamId})
+ end).
+
+phase_start_new_leader(#{name := StreamId, leader_node := Node, leader_pid := LPid} = Conf) ->
+ spawn(fun() ->
+ osiris_replica:stop(Node, Conf),
+ %% If the start fails, the monitor will capture the crash and restart it
+ case osiris_writer:start(Conf) of
+ {ok, Pid} ->
+ ra:pipeline_command({?MODULE, node()},
+ {leader_elected, StreamId, Pid});
+ {error, already_present} ->
+ ra:pipeline_command({?MODULE, node()},
+ {leader_elected, StreamId, LPid});
+ {error, {already_started, Pid}} ->
+ ra:pipeline_command({?MODULE, node()},
+ {leader_elected, StreamId, Pid})
+ end
+ end).
+
+phase_check_quorum(#{name := StreamId,
+ epoch := Epoch,
+ replica_nodes := Nodes} = Conf) ->
+ spawn(fun() ->
+ Offsets = find_replica_offsets(Conf),
+ case is_quorum(length(Nodes) + 1, length(Offsets)) of
+ true ->
+ ra:pipeline_command({?MODULE, node()},
+ {start_leader_election, StreamId, Epoch + 1, Offsets});
+ false ->
+ %% Let's crash this process so the monitor will restart it
+ exit({not_enough_quorum, StreamId})
+ end
+ end).
+
+find_replica_offsets(#{replica_nodes := Nodes,
+ leader_node := Leader} = Conf) ->
+ lists:foldl(
+ fun(Node, Acc) ->
+ try
+ %% osiris_log:overview/1 needs the directory - last item of the list
+ case rpc:call(Node, rabbit, is_running, []) of
+ false ->
+ Acc;
+ true ->
+ case rpc:call(Node, ?MODULE, log_overview, [Conf]) of
+ {badrpc, nodedown} ->
+ Acc;
+ {_Range, Offsets} ->
+ [{Node, select_highest_offset(Offsets)} | Acc]
+ end
+ end
+ catch
+ _:_ ->
+ Acc
+ end
+ end, [], Nodes ++ [Leader]).
+
+select_highest_offset([]) ->
+ empty;
+select_highest_offset(Offsets) ->
+ lists:last(Offsets).
+
+log_overview(Config) ->
+ Dir = osiris_log:directory(Config),
+ osiris_log:overview(Dir).
+
+find_max_offset(Offsets) ->
+ [{Node, _} | _] = lists:sort(fun({_, {Ao, E}}, {_, {Bo, E}}) ->
+ Ao >= Bo;
+ ({_, {_, Ae}}, {_, {_, Be}}) ->
+ Ae >= Be;
+ ({_, empty}, _) ->
+ false;
+ (_, {_, empty}) ->
+ true
+ end, Offsets),
+ Node.
+
+is_quorum(1, 1) ->
+ true;
+is_quorum(NumReplicas, NumAlive) ->
+ NumAlive >= ((NumReplicas div 2) + 1).
+
+phase_repair_mnesia(new, Q) ->
+ spawn(fun() ->
+ Reply = rabbit_amqqueue:internal_declare(Q, false),
+ #{name := StreamId} = amqqueue:get_type_state(Q),
+ ra:pipeline_command({?MODULE, node()}, {phase_finished, StreamId, Reply})
+ end);
+
+phase_repair_mnesia(update, #{reference := QName,
+ leader_pid := LeaderPid,
+ name := StreamId} = Conf) ->
+ Fun = fun (Q) ->
+ amqqueue:set_type_state(amqqueue:set_pid(Q, LeaderPid), Conf)
+ end,
+ spawn(fun() ->
+ case rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ rabbit_amqqueue:update(QName, Fun)
+ end) of
+ not_found ->
+ %% This can happen during recovery
+ [Q] = mnesia:dirty_read(rabbit_durable_queue, QName),
+ rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q));
+ _ ->
+ ok
+ end,
+ ra:pipeline_command({?MODULE, node()}, {phase_finished, StreamId, ok})
+ end).
+
+phase_start_cluster(Q0) ->
+ spawn(
+ fun() ->
+ case osiris:start_cluster(amqqueue:get_type_state(Q0)) of
+ {ok, #{leader_pid := Pid} = Conf} ->
+ Q = amqqueue:set_type_state(amqqueue:set_pid(Q0, Pid), Conf),
+ ra:pipeline_command({?MODULE, node()}, {start_cluster_reply, Q});
+ {error, {already_started, _}} ->
+ ra:pipeline_command({?MODULE, node()}, {start_cluster_finished, {error, already_started}})
+ end
+ end).
+
+phase_delete_cluster(#{name := StreamId,
+ reference := QName} = Conf, ActingUser) ->
+ spawn(
+ fun() ->
+ ok = osiris:delete_cluster(Conf),
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ ra:pipeline_command({?MODULE, node()}, {delete_cluster_reply, StreamId})
+ end).
+
+format_ra_event(ServerId, Evt) ->
+ {stream_coordinator_event, ServerId, Evt}.
+
+make_ra_conf(Node, Nodes) ->
+ UId = ra:new_uid(ra_lib:to_binary(?MODULE)),
+ Formatter = {?MODULE, format_ra_event, []},
+ Members = [{?MODULE, N} || N <- Nodes],
+ TickTimeout = application:get_env(rabbit, stream_tick_interval,
+ ?TICK_TIMEOUT),
+ #{cluster_name => ?MODULE,
+ id => {?MODULE, Node},
+ uid => UId,
+ friendly_name => atom_to_list(?MODULE),
+ metrics_key => ?MODULE,
+ initial_members => Members,
+ log_init_args => #{uid => UId},
+ tick_timeout => TickTimeout,
+ machine => {module, ?MODULE, #{}},
+ ra_event_formatter => Formatter}.
+
+add_unique(Node, Nodes) ->
+ case lists:member(Node, Nodes) of
+ true ->
+ Nodes;
+ _ ->
+ [Node | Nodes]
+ end.
+
+delete_replica_pid(Node, ReplicaPids) ->
+ lists:partition(fun(P) -> node(P) =/= Node end, ReplicaPids).
+
+apply_leader_locator_strategy(#{leader_locator_strategy := <<"client-local">>} = Conf, _) ->
+ Conf;
+apply_leader_locator_strategy(#{leader_node := Leader,
+ replica_nodes := Replicas0,
+ leader_locator_strategy := <<"random">>,
+ name := StreamId} = Conf, _) ->
+ Replicas = [Leader | Replicas0],
+ ClusterSize = length(Replicas),
+ Hash = erlang:phash2(StreamId),
+ Pos = (Hash rem ClusterSize) + 1,
+ NewLeader = lists:nth(Pos, Replicas),
+ NewReplicas = lists:delete(NewLeader, Replicas),
+ Conf#{leader_node => NewLeader,
+ replica_nodes => NewReplicas};
+apply_leader_locator_strategy(#{leader_node := Leader,
+ replica_nodes := Replicas0,
+ leader_locator_strategy := <<"least-leaders">>} = Conf,
+ Streams) ->
+ Replicas = [Leader | Replicas0],
+ Counters0 = maps:from_list([{R, 0} || R <- Replicas]),
+ Counters = maps:to_list(maps:fold(fun(_Key, #{conf := #{leader_node := L}}, Acc) ->
+ maps:update_with(L, fun(V) -> V + 1 end, 0, Acc)
+ end, Counters0, Streams)),
+ Ordered = lists:sort(fun({_, V1}, {_, V2}) ->
+ V1 =< V2
+ end, Counters),
+ %% We could have potentially introduced nodes that are not in the list of replicas if
+ %% initial cluster size is smaller than the cluster size. Let's select the first one
+ %% that is on the list of replicas
+ NewLeader = select_first_matching_node(Ordered, Replicas),
+ NewReplicas = lists:delete(NewLeader, Replicas),
+ Conf#{leader_node => NewLeader,
+ replica_nodes => NewReplicas}.
+
+select_first_matching_node([{N, _} | Rest], Replicas) ->
+ case lists:member(N, Replicas) of
+ true -> N;
+ false -> select_first_matching_node(Rest, Replicas)
+ end.
diff --git a/deps/rabbit/src/rabbit_stream_queue.erl b/deps/rabbit/src/rabbit_stream_queue.erl
new file mode 100644
index 0000000000..4e428495b0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_stream_queue.erl
@@ -0,0 +1,734 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_queue).
+
+-behaviour(rabbit_queue_type).
+
+-export([is_enabled/0,
+ declare/2,
+ delete/4,
+ purge/1,
+ policy_changed/1,
+ recover/2,
+ is_recoverable/1,
+ consume/3,
+ cancel/5,
+ handle_event/2,
+ deliver/2,
+ settle/4,
+ credit/4,
+ dequeue/4,
+ info/2,
+ init/1,
+ close/1,
+ update/2,
+ state_info/1,
+ stat/1,
+ capabilities/0]).
+
+-export([set_retention_policy/3]).
+-export([add_replica/3,
+ delete_replica/3]).
+-export([format_osiris_event/2]).
+-export([update_stream_conf/1]).
+
+-include("rabbit.hrl").
+-include("amqqueue.hrl").
+
+-define(INFO_KEYS, [name, durable, auto_delete, arguments, leader, members, online, state,
+ messages, messages_ready, messages_unacknowledged, committed_offset,
+ policy, operator_policy, effective_policy_definition, type]).
+
+-type appender_seq() :: non_neg_integer().
+
+-record(stream, {name :: rabbit_types:r('queue'),
+ credit :: integer(),
+ max :: non_neg_integer(),
+ start_offset = 0 :: non_neg_integer(),
+ listening_offset = 0 :: non_neg_integer(),
+ log :: undefined | osiris_log:state()}).
+
+-record(stream_client, {name :: term(),
+ leader :: pid(),
+ next_seq = 1 :: non_neg_integer(),
+ correlation = #{} :: #{appender_seq() => term()},
+ soft_limit :: non_neg_integer(),
+ slow = false :: boolean(),
+ readers = #{} :: #{term() => #stream{}}
+ }).
+
+-import(rabbit_queue_type_util, [args_policy_lookup/3]).
+
+-type client() :: #stream_client{}.
+
+-spec is_enabled() -> boolean().
+is_enabled() ->
+ rabbit_feature_flags:is_enabled(stream_queue).
+
+-spec declare(amqqueue:amqqueue(), node()) ->
+ {'new' | 'existing', amqqueue:amqqueue()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(Q0, Node) when ?amqqueue_is_stream(Q0) ->
+ case rabbit_queue_type_util:run_checks(
+ [fun rabbit_queue_type_util:check_auto_delete/1,
+ fun rabbit_queue_type_util:check_exclusive/1,
+ fun rabbit_queue_type_util:check_non_durable/1],
+ Q0) of
+ ok ->
+ start_cluster(Q0, Node);
+ Err ->
+ Err
+ end.
+
+start_cluster(Q0, Node) ->
+ Arguments = amqqueue:get_arguments(Q0),
+ QName = amqqueue:get_name(Q0),
+ Opts = amqqueue:get_options(Q0),
+ ActingUser = maps:get(user, Opts, ?UNKNOWN_USER),
+ Conf0 = make_stream_conf(Node, Q0),
+ case rabbit_stream_coordinator:start_cluster(
+ amqqueue:set_type_state(Q0, Conf0)) of
+ {ok, {error, already_started}, _} ->
+ {protocol_error, precondition_failed, "safe queue name already in use '~s'",
+ [Node]};
+ {ok, {created, Q}, _} ->
+ rabbit_event:notify(queue_created,
+ [{name, QName},
+ {durable, true},
+ {auto_delete, false},
+ {arguments, Arguments},
+ {user_who_performed_action,
+ ActingUser}]),
+ {new, Q};
+ {ok, {error, Error}, _} ->
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ {protocol_error, internal_error, "Cannot declare a queue '~s' on node '~s': ~255p",
+ [rabbit_misc:rs(QName), node(), Error]};
+ {ok, {existing, Q}, _} ->
+ {existing, Q};
+ {error, coordinator_unavailable} ->
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ {protocol_error, internal_error,
+ "Cannot declare a queue '~s' on node '~s': coordinator unavailable",
+ [rabbit_misc:rs(QName), node()]}
+ end.
+
+-spec delete(amqqueue:amqqueue(), boolean(),
+ boolean(), rabbit_types:username()) ->
+ rabbit_types:ok(non_neg_integer()) |
+ rabbit_types:error(in_use | not_empty).
+delete(Q, _IfUnused, _IfEmpty, ActingUser) ->
+ Name = maps:get(name, amqqueue:get_type_state(Q)),
+ {ok, Reply, _} = rabbit_stream_coordinator:delete_cluster(Name, ActingUser),
+ Reply.
+
+-spec purge(amqqueue:amqqueue()) ->
+ {ok, non_neg_integer()} | {error, term()}.
+purge(_) ->
+ {error, not_supported}.
+
+-spec policy_changed(amqqueue:amqqueue()) -> 'ok'.
+policy_changed(Q) ->
+ Name = maps:get(name, amqqueue:get_type_state(Q)),
+ _ = rabbit_stream_coordinator:policy_changed(Name),
+ ok.
+
+stat(_) ->
+ {ok, 0, 0}.
+
+consume(Q, #{prefetch_count := 0}, _)
+ when ?amqqueue_is_stream(Q) ->
+ {protocol_error, precondition_failed, "consumer prefetch count is not set for '~s'",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]};
+consume(Q, #{no_ack := true}, _)
+ when ?amqqueue_is_stream(Q) ->
+ {protocol_error, not_implemented,
+ "automatic acknowledgement not supported by stream queues ~s",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]};
+consume(Q, #{limiter_active := true}, _State)
+ when ?amqqueue_is_stream(Q) ->
+ {error, global_qos_not_supported_for_queue_type};
+consume(Q, Spec, QState0) when ?amqqueue_is_stream(Q) ->
+ %% Messages should include the offset as a custom header.
+ case check_queue_exists_in_local_node(Q) of
+ ok ->
+ #{no_ack := NoAck,
+ channel_pid := ChPid,
+ prefetch_count := ConsumerPrefetchCount,
+ consumer_tag := ConsumerTag,
+ exclusive_consume := ExclusiveConsume,
+ args := Args,
+ ok_msg := OkMsg} = Spec,
+ QName = amqqueue:get_name(Q),
+ Offset = case rabbit_misc:table_lookup(Args, <<"x-stream-offset">>) of
+ undefined ->
+ next;
+ {_, <<"first">>} ->
+ first;
+ {_, <<"last">>} ->
+ last;
+ {_, <<"next">>} ->
+ next;
+ {timestamp, V} ->
+ {timestamp, V};
+ {_, V} ->
+ V
+ end,
+ rabbit_core_metrics:consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
+ not NoAck, QName,
+ ConsumerPrefetchCount, false,
+ up, Args),
+ %% FIXME: reply needs to be sent before the stream begins sending
+ %% really it should be sent by the stream queue process like classic queues
+ %% do
+ maybe_send_reply(ChPid, OkMsg),
+ QState = begin_stream(QState0, Q, ConsumerTag, Offset,
+ ConsumerPrefetchCount),
+ {ok, QState, []};
+ Err ->
+ Err
+ end.
+
+get_local_pid(#{leader_pid := Pid}) when node(Pid) == node() ->
+ Pid;
+get_local_pid(#{replica_pids := ReplicaPids}) ->
+ [Local | _] = lists:filter(fun(Pid) ->
+ node(Pid) == node()
+ end, ReplicaPids),
+ Local.
+
+begin_stream(#stream_client{readers = Readers0} = State,
+ Q, Tag, Offset, Max) ->
+ LocalPid = get_local_pid(amqqueue:get_type_state(Q)),
+ {ok, Seg0} = osiris:init_reader(LocalPid, Offset),
+ NextOffset = osiris_log:next_offset(Seg0) - 1,
+ osiris:register_offset_listener(LocalPid, NextOffset),
+ %% TODO: avoid double calls to the same process
+ StartOffset = case Offset of
+ first -> NextOffset;
+ last -> NextOffset;
+ next -> NextOffset;
+ {timestamp, _} -> NextOffset;
+ _ -> Offset
+ end,
+ Str0 = #stream{name = amqqueue:get_name(Q),
+ credit = Max,
+ start_offset = StartOffset,
+ listening_offset = NextOffset,
+ log = Seg0,
+ max = Max},
+ State#stream_client{readers = Readers0#{Tag => Str0}}.
+
+cancel(_Q, ConsumerTag, OkMsg, ActingUser, #stream_client{readers = Readers0,
+ name = QName} = State) ->
+ Readers = maps:remove(ConsumerTag, Readers0),
+ rabbit_core_metrics:consumer_deleted(self(), ConsumerTag, QName),
+ rabbit_event:notify(consumer_deleted, [{consumer_tag, ConsumerTag},
+ {channel, self()},
+ {queue, QName},
+ {user_who_performed_action, ActingUser}]),
+ maybe_send_reply(self(), OkMsg),
+ {ok, State#stream_client{readers = Readers}}.
+
+credit(CTag, Credit, Drain, #stream_client{readers = Readers0,
+ name = Name,
+ leader = Leader} = State) ->
+ {Readers1, Msgs} = case Readers0 of
+ #{CTag := #stream{credit = Credit0} = Str0} ->
+ Str1 = Str0#stream{credit = Credit0 + Credit},
+ {Str, Msgs0} = stream_entries(Name, Leader, Str1),
+ {Readers0#{CTag => Str}, Msgs0};
+ _ ->
+ {Readers0, []}
+ end,
+ {Readers, Actions} =
+ case Drain of
+ true ->
+ case Readers1 of
+ #{CTag := #stream{credit = Credit1} = Str2} ->
+ {Readers0#{CTag => Str2#stream{credit = 0}}, [{send_drained, {CTag, Credit1}}]};
+ _ ->
+ {Readers1, []}
+ end;
+ false ->
+ {Readers1, []}
+ end,
+ {State#stream_client{readers = Readers}, [{send_credit_reply, length(Msgs)},
+ {deliver, CTag, true, Msgs}] ++ Actions}.
+
+deliver(QSs, #delivery{confirm = Confirm} = Delivery) ->
+ lists:foldl(
+ fun({_Q, stateless}, {Qs, Actions}) ->
+ %% TODO what do we do with stateless?
+ %% QRef = amqqueue:get_pid(Q),
+ %% ok = rabbit_fifo_client:untracked_enqueue(
+ %% [QRef], Delivery#delivery.message),
+ {Qs, Actions};
+ ({Q, S0}, {Qs, Actions}) ->
+ S = deliver(Confirm, Delivery, S0),
+ {[{Q, S} | Qs], Actions}
+ end, {[], []}, QSs).
+
+deliver(_Confirm, #delivery{message = Msg, msg_seq_no = MsgId},
+ #stream_client{name = Name,
+ leader = LeaderPid,
+ next_seq = Seq,
+ correlation = Correlation0,
+ soft_limit = SftLmt,
+ slow = Slow0} = State) ->
+ ok = osiris:write(LeaderPid, Seq, msg_to_iodata(Msg)),
+ Correlation = case MsgId of
+ undefined ->
+ Correlation0;
+ _ when is_number(MsgId) ->
+ Correlation0#{Seq => MsgId}
+ end,
+ Slow = case maps:size(Correlation) >= SftLmt of
+ true when not Slow0 ->
+ credit_flow:block(Name),
+ true;
+ Bool ->
+ Bool
+ end,
+ State#stream_client{next_seq = Seq + 1,
+ correlation = Correlation,
+ slow = Slow}.
+-spec dequeue(_, _, _, client()) -> no_return().
+dequeue(_, _, _, #stream_client{name = Name}) ->
+ {protocol_error, not_implemented, "basic.get not supported by stream queues ~s",
+ [rabbit_misc:rs(Name)]}.
+
+handle_event({osiris_written, From, Corrs}, State = #stream_client{correlation = Correlation0,
+ soft_limit = SftLmt,
+ slow = Slow0,
+ name = Name}) ->
+ MsgIds = maps:values(maps:with(Corrs, Correlation0)),
+ Correlation = maps:without(Corrs, Correlation0),
+ Slow = case maps:size(Correlation) < SftLmt of
+ true when Slow0 ->
+ credit_flow:unblock(Name),
+ false;
+ _ ->
+ Slow0
+ end,
+ {ok, State#stream_client{correlation = Correlation,
+ slow = Slow}, [{settled, From, MsgIds}]};
+handle_event({osiris_offset, _From, _Offs}, State = #stream_client{leader = Leader,
+ readers = Readers0,
+ name = Name}) ->
+ %% offset isn't actually needed as we use the atomic to read the
+ %% current committed
+ {Readers, TagMsgs} = maps:fold(
+ fun (Tag, Str0, {Acc, TM}) ->
+ {Str, Msgs} = stream_entries(Name, Leader, Str0),
+ %% HACK for now, better to just return but
+ %% tricky with acks credits
+ %% that also evaluate the stream
+ % gen_server:cast(self(), {stream_delivery, Tag, Msgs}),
+ {Acc#{Tag => Str}, [{Tag, Leader, Msgs} | TM]}
+ end, {#{}, []}, Readers0),
+ Ack = true,
+ Deliveries = [{deliver, Tag, Ack, OffsetMsg}
+ || {Tag, _LeaderPid, OffsetMsg} <- TagMsgs],
+ {ok, State#stream_client{readers = Readers}, Deliveries}.
+
+is_recoverable(Q) ->
+ Node = node(),
+ #{replica_nodes := Nodes,
+ leader_node := Leader} = amqqueue:get_type_state(Q),
+ lists:member(Node, Nodes ++ [Leader]).
+
+recover(_VHost, Queues) ->
+ lists:foldl(
+ fun (Q0, {R0, F0}) ->
+ {ok, Q} = recover(Q0),
+ {[Q | R0], F0}
+ end, {[], []}, Queues).
+
+settle(complete, CTag, MsgIds, #stream_client{readers = Readers0,
+ name = Name,
+ leader = Leader} = State) ->
+ Credit = length(MsgIds),
+ {Readers, Msgs} = case Readers0 of
+ #{CTag := #stream{credit = Credit0} = Str0} ->
+ Str1 = Str0#stream{credit = Credit0 + Credit},
+ {Str, Msgs0} = stream_entries(Name, Leader, Str1),
+ {Readers0#{CTag => Str}, Msgs0};
+ _ ->
+ {Readers0, []}
+ end,
+ {State#stream_client{readers = Readers}, [{deliver, CTag, true, Msgs}]};
+settle(_, _, _, #stream_client{name = Name}) ->
+ {protocol_error, not_implemented,
+ "basic.nack and basic.reject not supported by stream queues ~s",
+ [rabbit_misc:rs(Name)]}.
+
+info(Q, all_items) ->
+ info(Q, ?INFO_KEYS);
+info(Q, Items) ->
+ lists:foldr(fun(Item, Acc) ->
+ [{Item, i(Item, Q)} | Acc]
+ end, [], Items).
+
+i(name, Q) when ?is_amqqueue(Q) -> amqqueue:get_name(Q);
+i(durable, Q) when ?is_amqqueue(Q) -> amqqueue:is_durable(Q);
+i(auto_delete, Q) when ?is_amqqueue(Q) -> amqqueue:is_auto_delete(Q);
+i(arguments, Q) when ?is_amqqueue(Q) -> amqqueue:get_arguments(Q);
+i(leader, Q) when ?is_amqqueue(Q) ->
+ #{leader_node := Leader} = amqqueue:get_type_state(Q),
+ Leader;
+i(members, Q) when ?is_amqqueue(Q) ->
+ #{replica_nodes := Nodes} = amqqueue:get_type_state(Q),
+ Nodes;
+i(online, Q) ->
+ #{replica_pids := ReplicaPids,
+ leader_pid := LeaderPid} = amqqueue:get_type_state(Q),
+ [node(P) || P <- ReplicaPids ++ [LeaderPid], rabbit_misc:is_process_alive(P)];
+i(state, Q) when ?is_amqqueue(Q) ->
+ %% TODO the coordinator should answer this, I guess??
+ running;
+i(messages, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, _, _, M, _}] ->
+ M;
+ [] ->
+ 0
+ end;
+i(messages_ready, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, MR, _, _, _}] ->
+ MR;
+ [] ->
+ 0
+ end;
+i(messages_unacknowledged, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, _, MU, _, _}] ->
+ MU;
+ [] ->
+ 0
+ end;
+i(committed_offset, Q) ->
+ %% TODO should it be on a metrics table?
+ Data = osiris_counters:overview(),
+ maps:get(committed_offset,
+ maps:get({osiris_writer, amqqueue:get_name(Q)}, Data));
+i(policy, Q) ->
+ case rabbit_policy:name(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(operator_policy, Q) ->
+ case rabbit_policy:name_op(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(effective_policy_definition, Q) ->
+ case rabbit_policy:effective_definition(Q) of
+ undefined -> [];
+ Def -> Def
+ end;
+i(type, _) ->
+ stream;
+i(_, _) ->
+ ''.
+
+init(Q) when ?is_amqqueue(Q) ->
+ Leader = amqqueue:get_pid(Q),
+ {ok, SoftLimit} = application:get_env(rabbit, stream_messages_soft_limit),
+ #stream_client{name = amqqueue:get_name(Q),
+ leader = Leader,
+ soft_limit = SoftLimit}.
+
+close(#stream_client{readers = Readers}) ->
+ _ = maps:map(fun (_, #stream{log = Log}) ->
+ osiris_log:close(Log)
+ end, Readers),
+ ok.
+
+update(_, State) ->
+ State.
+
+state_info(_) ->
+ #{}.
+
+set_retention_policy(Name, VHost, Policy) ->
+ case rabbit_amqqueue:check_max_age(Policy) of
+ {error, _} = E ->
+ E;
+ MaxAge ->
+ QName = rabbit_misc:r(VHost, queue, Name),
+ Fun = fun(Q) ->
+ Conf = amqqueue:get_type_state(Q),
+ amqqueue:set_type_state(Q, Conf#{max_age => MaxAge})
+ end,
+ case rabbit_misc:execute_mnesia_transaction(
+ fun() -> rabbit_amqqueue:update(QName, Fun) end) of
+ not_found ->
+ {error, not_found};
+ _ ->
+ ok
+ end
+ end.
+
+add_replica(VHost, Name, Node) ->
+ QName = rabbit_misc:r(VHost, queue, Name),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ {error, quorum_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_stream(Q) ->
+ case lists:member(Node, rabbit_mnesia:cluster_nodes(running)) of
+ false ->
+ {error, node_not_running};
+ true ->
+ #{name := StreamId} = amqqueue:get_type_state(Q),
+ {ok, Reply, _} = rabbit_stream_coordinator:add_replica(StreamId, Node),
+ Reply
+ end;
+ E ->
+ E
+ end.
+
+delete_replica(VHost, Name, Node) ->
+ QName = rabbit_misc:r(VHost, queue, Name),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ {error, quorum_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_stream(Q) ->
+ case lists:member(Node, rabbit_mnesia:cluster_nodes(running)) of
+ false ->
+ {error, node_not_running};
+ true ->
+ #{name := StreamId} = amqqueue:get_type_state(Q),
+ {ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node),
+ Reply
+ end;
+ E ->
+ E
+ end.
+
+make_stream_conf(Node, Q) ->
+ QName = amqqueue:get_name(Q),
+ Name = queue_name(QName),
+ %% MaxLength = args_policy_lookup(<<"max-length">>, fun min/2, Q),
+ MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q),
+ MaxAge = max_age(args_policy_lookup(<<"max-age">>, fun max_age/2, Q)),
+ MaxSegmentSize = args_policy_lookup(<<"max-segment-size">>, fun min/2, Q),
+ LeaderLocator = queue_leader_locator(args_policy_lookup(<<"queue-leader-locator">>,
+ fun res_arg/2, Q)),
+ InitialClusterSize = initial_cluster_size(args_policy_lookup(<<"initial-cluster-size">>,
+ fun res_arg/2, Q)),
+ Replicas0 = rabbit_mnesia:cluster_nodes(all) -- [Node],
+ Replicas = select_stream_nodes(InitialClusterSize - 1, Replicas0),
+ Formatter = {?MODULE, format_osiris_event, [QName]},
+ Retention = lists:filter(fun({_, R}) ->
+ R =/= undefined
+ end, [{max_bytes, MaxBytes},
+ {max_age, MaxAge}]),
+ add_if_defined(max_segment_size, MaxSegmentSize, #{reference => QName,
+ name => Name,
+ retention => Retention,
+ leader_locator_strategy => LeaderLocator,
+ leader_node => Node,
+ replica_nodes => Replicas,
+ event_formatter => Formatter,
+ epoch => 1}).
+
+select_stream_nodes(Size, All) when length(All) =< Size ->
+ All;
+select_stream_nodes(Size, All) ->
+ Node = node(),
+ case lists:member(Node, All) of
+ true ->
+ select_stream_nodes(Size - 1, lists:delete(Node, All), [Node]);
+ false ->
+ select_stream_nodes(Size, All, [])
+ end.
+
+select_stream_nodes(0, _, Selected) ->
+ Selected;
+select_stream_nodes(Size, Rest, Selected) ->
+ S = lists:nth(rand:uniform(length(Rest)), Rest),
+ select_stream_nodes(Size - 1, lists:delete(S, Rest), [S | Selected]).
+
+update_stream_conf(#{reference := QName} = Conf) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q),
+ MaxAge = max_age(args_policy_lookup(<<"max-age">>, fun max_age/2, Q)),
+ MaxSegmentSize = args_policy_lookup(<<"max-segment-size">>, fun min/2, Q),
+ Retention = lists:filter(fun({_, R}) ->
+ R =/= undefined
+ end, [{max_bytes, MaxBytes},
+ {max_age, MaxAge}]),
+ add_if_defined(max_segment_size, MaxSegmentSize, Conf#{retention => Retention});
+ _ ->
+ Conf
+ end.
+
+add_if_defined(_, undefined, Map) ->
+ Map;
+add_if_defined(Key, Value, Map) ->
+ maps:put(Key, Value, Map).
+
+format_osiris_event(Evt, QRef) ->
+ {'$gen_cast', {queue_event, QRef, Evt}}.
+
+max_age(undefined) ->
+ undefined;
+max_age(Bin) when is_binary(Bin) ->
+ rabbit_amqqueue:check_max_age(Bin);
+max_age(Age) ->
+ Age.
+
+max_age(Age1, Age2) ->
+ min(rabbit_amqqueue:check_max_age(Age1), rabbit_amqqueue:check_max_age(Age2)).
+
+queue_leader_locator(undefined) -> <<"client-local">>;
+queue_leader_locator(Val) -> Val.
+
+initial_cluster_size(undefined) ->
+ length(rabbit_mnesia:cluster_nodes(running));
+initial_cluster_size(Val) ->
+ Val.
+
+res_arg(PolVal, undefined) -> PolVal;
+res_arg(_, ArgVal) -> ArgVal.
+
+queue_name(#resource{virtual_host = VHost, name = Name}) ->
+ Timestamp = erlang:integer_to_binary(erlang:system_time()),
+ osiris_util:to_base64uri(erlang:binary_to_list(<<VHost/binary, "_", Name/binary, "_",
+ Timestamp/binary>>)).
+
+recover(Q) ->
+ rabbit_stream_coordinator:recover(),
+ {ok, Q}.
+
+check_queue_exists_in_local_node(Q) ->
+ Conf = amqqueue:get_type_state(Q),
+ AllNodes = [maps:get(leader_node, Conf) | maps:get(replica_nodes, Conf)],
+ case lists:member(node(), AllNodes) of
+ true ->
+ ok;
+ false ->
+ {protocol_error, precondition_failed,
+ "queue '~s' does not a have a replica on the local node",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]}
+ end.
+
+maybe_send_reply(_ChPid, undefined) -> ok;
+maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
+
+stream_entries(Name, Id, Str) ->
+ stream_entries(Name, Id, Str, []).
+
+stream_entries(Name, LeaderPid,
+ #stream{name = QName,
+ credit = Credit,
+ start_offset = StartOffs,
+ listening_offset = LOffs,
+ log = Seg0} = Str0, MsgIn)
+ when Credit > 0 ->
+ case osiris_log:read_chunk_parsed(Seg0) of
+ {end_of_stream, Seg} ->
+ NextOffset = osiris_log:next_offset(Seg),
+ case NextOffset > LOffs of
+ true ->
+ osiris:register_offset_listener(LeaderPid, NextOffset),
+ {Str0#stream{log = Seg,
+ listening_offset = NextOffset}, MsgIn};
+ false ->
+ {Str0#stream{log = Seg}, MsgIn}
+ end;
+ {Records, Seg} ->
+ Msgs = [begin
+ Msg0 = binary_to_msg(QName, B),
+ Msg = rabbit_basic:add_header(<<"x-stream-offset">>,
+ long, O, Msg0),
+ {Name, LeaderPid, O, false, Msg}
+ end || {O, B} <- Records,
+ O >= StartOffs],
+
+ NumMsgs = length(Msgs),
+
+ Str = Str0#stream{credit = Credit - NumMsgs,
+ log = Seg},
+ case Str#stream.credit < 1 of
+ true ->
+ %% we are done here
+ {Str, MsgIn ++ Msgs};
+ false ->
+ %% if there are fewer Msgs than Entries0 it means there were non-events
+ %% in the log and we should recurse and try again
+ stream_entries(Name, LeaderPid, Str, MsgIn ++ Msgs)
+ end
+ end;
+stream_entries(_Name, _Id, Str, Msgs) ->
+ {Str, Msgs}.
+
+binary_to_msg(#resource{virtual_host = VHost,
+ kind = queue,
+ name = QName}, Data) ->
+ R0 = rabbit_msg_record:init(Data),
+ %% if the message annotation isn't present the data most likely came from
+ %% the rabbitmq-stream plugin so we'll choose defaults that simulate use
+ %% of the direct exchange
+ {utf8, Exchange} = rabbit_msg_record:message_annotation(<<"x-exchange">>,
+ R0, {utf8, <<>>}),
+ {utf8, RoutingKey} = rabbit_msg_record:message_annotation(<<"x-routing-key">>,
+ R0, {utf8, QName}),
+ {Props, Payload} = rabbit_msg_record:to_amqp091(R0),
+ XName = #resource{kind = exchange,
+ virtual_host = VHost,
+ name = Exchange},
+ Content = #content{class_id = 60,
+ properties = Props,
+ properties_bin = none,
+ payload_fragments_rev = [Payload]},
+ {ok, Msg} = rabbit_basic:message(XName, RoutingKey, Content),
+ Msg.
+
+
+msg_to_iodata(#basic_message{exchange_name = #resource{name = Exchange},
+ routing_keys = [RKey | _],
+ content = Content}) ->
+ #content{properties = Props,
+ payload_fragments_rev = Payload} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ R0 = rabbit_msg_record:from_amqp091(Props, lists:reverse(Payload)),
+ %% TODO durable?
+ R = rabbit_msg_record:add_message_annotations(
+ #{<<"x-exchange">> => {utf8, Exchange},
+ <<"x-routing-key">> => {utf8, RKey}}, R0),
+ rabbit_msg_record:to_iodata(R).
+
+capabilities() ->
+ #{policies => [<<"max-length-bytes">>, <<"max-age">>, <<"max-segment-size">>,
+ <<"queue-leader-locator">>, <<"initial-cluster-size">>],
+ queue_arguments => [<<"x-dead-letter-exchange">>, <<"x-dead-letter-routing-key">>,
+ <<"x-max-length">>, <<"x-max-length-bytes">>,
+ <<"x-single-active-consumer">>, <<"x-queue-type">>,
+ <<"x-max-age">>, <<"x-max-segment-size">>,
+ <<"x-initial-cluster-size">>, <<"x-queue-leader-locator">>],
+ consumer_arguments => [<<"x-stream-offset">>],
+ server_named => false}.
diff --git a/deps/rabbit/src/rabbit_sup.erl b/deps/rabbit/src/rabbit_sup.erl
new file mode 100644
index 0000000000..06643b155d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_sup.erl
@@ -0,0 +1,109 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, start_child/1, start_child/2, start_child/3, start_child/4,
+ start_supervisor_child/1, start_supervisor_child/2,
+ start_supervisor_child/3,
+ start_restartable_child/1, start_restartable_child/2,
+ start_delayed_restartable_child/1, start_delayed_restartable_child/2,
+ stop_child/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() -> supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+-spec start_child(atom()) -> 'ok'.
+
+start_child(Mod) -> start_child(Mod, []).
+
+-spec start_child(atom(), [any()]) -> 'ok'.
+
+start_child(Mod, Args) -> start_child(Mod, Mod, Args).
+
+-spec start_child(atom(), atom(), [any()]) -> 'ok'.
+
+start_child(ChildId, Mod, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, start_link, Args},
+ transient, ?WORKER_WAIT, worker, [Mod]})).
+
+-spec start_child(atom(), atom(), atom(), [any()]) -> 'ok'.
+
+start_child(ChildId, Mod, Fun, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, Fun, Args},
+ transient, ?WORKER_WAIT, worker, [Mod]})).
+
+-spec start_supervisor_child(atom()) -> 'ok'.
+
+start_supervisor_child(Mod) -> start_supervisor_child(Mod, []).
+
+-spec start_supervisor_child(atom(), [any()]) -> 'ok'.
+
+start_supervisor_child(Mod, Args) -> start_supervisor_child(Mod, Mod, Args).
+
+-spec start_supervisor_child(atom(), atom(), [any()]) -> 'ok'.
+
+start_supervisor_child(ChildId, Mod, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, start_link, Args},
+ transient, infinity, supervisor, [Mod]})).
+
+-spec start_restartable_child(atom()) -> 'ok'.
+
+start_restartable_child(M) -> start_restartable_child(M, [], false).
+
+-spec start_restartable_child(atom(), [any()]) -> 'ok'.
+
+start_restartable_child(M, A) -> start_restartable_child(M, A, false).
+
+-spec start_delayed_restartable_child(atom()) -> 'ok'.
+
+start_delayed_restartable_child(M) -> start_restartable_child(M, [], true).
+
+-spec start_delayed_restartable_child(atom(), [any()]) -> 'ok'.
+
+start_delayed_restartable_child(M, A) -> start_restartable_child(M, A, true).
+
+start_restartable_child(Mod, Args, Delay) ->
+ Name = list_to_atom(atom_to_list(Mod) ++ "_sup"),
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {Name, {rabbit_restartable_sup, start_link,
+ [Name, {Mod, start_link, Args}, Delay]},
+ transient, infinity, supervisor, [rabbit_restartable_sup]})).
+
+-spec stop_child(atom()) -> rabbit_types:ok_or_error(any()).
+
+stop_child(ChildId) ->
+ case supervisor:terminate_child(?SERVER, ChildId) of
+ ok -> supervisor:delete_child(?SERVER, ChildId);
+ E -> E
+ end.
+
+init([]) -> {ok, {{one_for_all, 0, 1}, []}}.
+
+
+%%----------------------------------------------------------------------------
+
+child_reply({ok, _}) -> ok;
+child_reply(X) -> X.
diff --git a/deps/rabbit/src/rabbit_sysmon_handler.erl b/deps/rabbit/src/rabbit_sysmon_handler.erl
new file mode 100644
index 0000000000..8f7298ed6e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_sysmon_handler.erl
@@ -0,0 +1,235 @@
+%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% https://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc A custom event handler to the `sysmon_handler' application's
+%% `system_monitor' event manager.
+%%
+%% This module attempts to discover more information about a process
+%% that generates a system_monitor event.
+
+-module(rabbit_sysmon_handler).
+
+-behaviour(gen_event).
+
+%% API
+-export([add_handler/0]).
+
+%% gen_event callbacks
+-export([init/1, handle_event/2, handle_call/2,
+ handle_info/2, terminate/2, code_change/3]).
+
+-record(state, {timer_ref :: reference() | undefined}).
+
+-define(INACTIVITY_TIMEOUT, 5000).
+
+%%%===================================================================
+%%% gen_event callbacks
+%%%===================================================================
+
+add_handler() ->
+ %% Vulnerable to race conditions (installing handler multiple
+ %% times), but risk is zero in the common OTP app startup case.
+ case lists:member(?MODULE, gen_event:which_handlers(sysmon_handler)) of
+ true ->
+ ok;
+ false ->
+ sysmon_handler_filter:add_custom_handler(?MODULE, [])
+ end.
+
+%%%===================================================================
+%%% gen_event callbacks
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever a new event handler is added to an event manager,
+%% this function is called to initialize the event handler.
+%%
+%% @spec init(Args) -> {ok, State}
+%% @end
+%%--------------------------------------------------------------------
+init([]) ->
+ {ok, #state{}, hibernate}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives an event sent using
+%% gen_event:notify/2 or gen_event:sync_notify/2, this function is
+%% called for each installed event handler to handle the event.
+%%
+%% @spec handle_event(Event, State) ->
+%% {ok, State} |
+%% {swap_handler, Args1, State1, Mod2, Args2} |
+%% remove_handler
+%% @end
+%%--------------------------------------------------------------------
+handle_event({monitor, Pid, Type, _Info},
+ State=#state{timer_ref=TimerRef}) when Pid == self() ->
+ %% Reset the inactivity timeout
+ NewTimerRef = reset_timer(TimerRef),
+ maybe_collect_garbage(Type),
+ {ok, State#state{timer_ref=NewTimerRef}};
+handle_event({monitor, PidOrPort, Type, Info}, State=#state{timer_ref=TimerRef}) ->
+ %% Reset the inactivity timeout
+ NewTimerRef = reset_timer(TimerRef),
+ {Fmt, Args} = format_pretty_proc_or_port_info(PidOrPort),
+ rabbit_log:warning("~p ~w ~w " ++ Fmt ++ " ~w", [?MODULE, Type, PidOrPort] ++ Args ++ [Info]),
+ {ok, State#state{timer_ref=NewTimerRef}};
+handle_event({suppressed, Type, Info}, State=#state{timer_ref=TimerRef}) ->
+ %% Reset the inactivity timeout
+ NewTimerRef = reset_timer(TimerRef),
+ rabbit_log:debug("~p encountered a suppressed event of type ~w: ~w", [?MODULE, Type, Info]),
+ {ok, State#state{timer_ref=NewTimerRef}};
+handle_event(Event, State=#state{timer_ref=TimerRef}) ->
+ NewTimerRef = reset_timer(TimerRef),
+ rabbit_log:warning("~p unhandled event: ~p", [?MODULE, Event]),
+ {ok, State#state{timer_ref=NewTimerRef}}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives a request sent using
+%% gen_event:call/3,4, this function is called for the specified
+%% event handler to handle the request.
+%%
+%% @spec handle_call(Request, State) ->
+%% {ok, Reply, State} |
+%% {swap_handler, Reply, Args1, State1, Mod2, Args2} |
+%% {remove_handler, Reply}
+%% @end
+%%--------------------------------------------------------------------
+handle_call(_Call, State) ->
+ Reply = not_supported,
+ {ok, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% This function is called for each installed event handler when
+%% an event manager receives any other message than an event or a
+%% synchronous request (or a system message).
+%%
+%% @spec handle_info(Info, State) ->
+%% {ok, State} |
+%% {swap_handler, Args1, State1, Mod2, Args2} |
+%% remove_handler
+%% @end
+%%--------------------------------------------------------------------
+handle_info(inactivity_timeout, State) ->
+ %% No events have arrived for the timeout period
+ %% so hibernate to free up resources.
+ {ok, State, hibernate};
+handle_info(Info, State) ->
+ rabbit_log:info("handle_info got ~p", [Info]),
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event handler is deleted from an event manager, this
+%% function is called. It should be the opposite of Module:init/1 and
+%% do any necessary cleaning up.
+%%
+%% @spec terminate(Reason, State) -> void()
+%% @end
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Convert process state when code is changed
+%%
+%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% @end
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+format_pretty_proc_or_port_info(PidOrPort) ->
+ try
+ case get_pretty_proc_or_port_info(PidOrPort) of
+ undefined ->
+ {"", []};
+ Res ->
+ Res
+ end
+ catch C:E:S ->
+ {"Pid ~w, ~W ~W at ~w\n",
+ [PidOrPort, C, 20, E, 20, S]}
+ end.
+
+get_pretty_proc_or_port_info(Pid) when is_pid(Pid) ->
+ Infos = [registered_name, initial_call, current_function, message_queue_len],
+ case process_info(Pid, Infos) of
+ undefined ->
+ undefined;
+ [] ->
+ undefined;
+ [{registered_name, RN0}, ICT1, {_, CF}, {_, MQL}] ->
+ ICT = case proc_lib:translate_initial_call(Pid) of
+ {proc_lib, init_p, 5} -> % not by proc_lib, see docs
+ ICT1;
+ ICT2 ->
+ {initial_call, ICT2}
+ end,
+ RNL = if RN0 == [] -> [];
+ true -> [{name, RN0}]
+ end,
+ {"~w", [RNL ++ [ICT, CF, {message_queue_len, MQL}]]}
+ end;
+get_pretty_proc_or_port_info(Port) when is_port(Port) ->
+ PortInfo = erlang:port_info(Port),
+ {value, {name, Name}, PortInfo2} = lists:keytake(name, 1, PortInfo),
+ QueueSize = [erlang:port_info(Port, queue_size)],
+ Connected = case proplists:get_value(connected, PortInfo2) of
+ undefined ->
+ [];
+ ConnectedPid ->
+ case proc_lib:translate_initial_call(ConnectedPid) of
+ {proc_lib, init_p, 5} -> % not by proc_lib, see docs
+ [];
+ ICT ->
+ [{initial_call, ICT}]
+ end
+ end,
+ {"name ~s ~w", [Name, lists:append([PortInfo2, QueueSize, Connected])]}.
+
+
+%% @doc If the message type is due to a large heap warning
+%% and the source is ourself, go ahead and collect garbage
+%% to avoid the death spiral.
+-spec maybe_collect_garbage(atom()) -> ok.
+maybe_collect_garbage(large_heap) ->
+ erlang:garbage_collect(),
+ ok;
+maybe_collect_garbage(_) ->
+ ok.
+
+-spec reset_timer(undefined | reference()) -> reference().
+reset_timer(undefined) ->
+ erlang:send_after(?INACTIVITY_TIMEOUT, self(), inactivity_timeout);
+reset_timer(TimerRef) ->
+ _ = erlang:cancel_timer(TimerRef),
+ reset_timer(undefined).
diff --git a/deps/rabbit/src/rabbit_sysmon_minder.erl b/deps/rabbit/src/rabbit_sysmon_minder.erl
new file mode 100644
index 0000000000..a0402e5ebe
--- /dev/null
+++ b/deps/rabbit/src/rabbit_sysmon_minder.erl
@@ -0,0 +1,156 @@
+%% -------------------------------------------------------------------
+%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved.
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% https://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+%%
+%% -------------------------------------------------------------------
+
+-module(rabbit_sysmon_minder).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Starts the server
+%%
+%% @spec start_link() -> {ok, Pid} | ignore | {error, Error}
+%% @end
+%%--------------------------------------------------------------------
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Initializes the server
+%%
+%% @spec init(Args) -> {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%% @end
+%%--------------------------------------------------------------------
+init([]) ->
+ %% Add our system_monitor event handler. We do that here because
+ %% we have a process at our disposal (i.e. ourself) to receive the
+ %% notification in the very unlikely event that the
+ %% sysmon_handler has crashed and been removed from the
+ %% sysmon_handler gen_event server. (If we had a supervisor
+ %% or app-starting process add the handler, then if the handler
+ %% crashes, nobody will act on the crash notification.)
+ rabbit_sysmon_handler:add_handler(),
+ {ok, #state{}}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling call messages
+%%
+%% @spec handle_call(Request, From, State) ->
+%% {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} |
+%% {stop, Reason, State}
+%% @end
+%%--------------------------------------------------------------------
+handle_call(_Request, _From, State) ->
+ Reply = ok,
+ {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling cast messages
+%%
+%% @spec handle_cast(Msg, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% @end
+%%--------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling all non call/cast messages
+%%
+%% @spec handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% @end
+%%--------------------------------------------------------------------
+handle_info({gen_event_EXIT, rabbit_sysmon_handler, _}, State) ->
+ %% SASL will create an error message, no need for us to duplicate it.
+ %%
+ %% Our handler should never crash, but it did indeed crash. If
+ %% there's a pathological condition somewhere that's generating
+ %% lots of unforseen things that crash core's custom handler, we
+ %% could make things worse by jumping back into the exploding
+ %% volcano. Wait a little bit before jumping back. Besides, the
+ %% system_monitor data is nice but is not critical: there is no
+ %% need to make things worse if things are indeed bad, and if we
+ %% miss a few seconds of system_monitor events, the world will not
+ %% end.
+ timer:sleep(2*1000),
+ rabbit_sysmon_handler:add_handler(),
+ {noreply, State};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% This function is called by a gen_server when it is about to
+%% terminate. It should be the opposite of Module:init/1 and do any
+%% necessary cleaning up. When it returns, the gen_server terminates
+%% with Reason. The return value is ignored.
+%%
+%% @spec terminate(Reason, State) -> void()
+%% @end
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Convert process state when code is changed
+%%
+%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% @end
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_table.erl b/deps/rabbit/src/rabbit_table.erl
new file mode 100644
index 0000000000..77534763d0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_table.erl
@@ -0,0 +1,416 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_table).
+
+-export([
+ create/0, create/2, ensure_local_copies/1, ensure_table_copy/2,
+ wait_for_replicated/1, wait/1, wait/2,
+ force_load/0, is_present/0, is_empty/0, needs_default_data/0,
+ check_schema_integrity/1, clear_ram_only_tables/0, retry_timeout/0,
+ wait_for_replicated/0, exists/1]).
+
+%% for testing purposes
+-export([definitions/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-type retry() :: boolean().
+-type mnesia_table() :: atom().
+
+%%----------------------------------------------------------------------------
+%% Main interface
+%%----------------------------------------------------------------------------
+
+-spec create() -> 'ok'.
+
+create() ->
+ lists:foreach(
+ fun ({Table, Def}) -> create(Table, Def) end,
+ definitions()),
+ ensure_secondary_indexes(),
+ ok.
+
+-spec create(mnesia_table(), list()) -> rabbit_types:ok_or_error(any()).
+
+create(TableName, TableDefinition) ->
+ TableDefinition1 = proplists:delete(match, TableDefinition),
+ rabbit_log:debug("Will create a schema database table '~s'", [TableName]),
+ case mnesia:create_table(TableName, TableDefinition1) of
+ {atomic, ok} -> ok;
+ {aborted,{already_exists, TableName}} -> ok;
+ {aborted, {already_exists, TableName, _}} -> ok;
+ {aborted, Reason} ->
+ throw({error, {table_creation_failed, TableName, TableDefinition1, Reason}})
+ end.
+
+-spec exists(mnesia_table()) -> boolean().
+exists(Table) ->
+ lists:member(Table, mnesia:system_info(tables)).
+
+%% Sets up secondary indexes in a blank node database.
+ensure_secondary_indexes() ->
+ ensure_secondary_index(rabbit_queue, vhost),
+ ok.
+
+ensure_secondary_index(Table, Field) ->
+ case mnesia:add_table_index(Table, Field) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, Table, _}} -> ok
+ end.
+
+-spec ensure_table_copy(mnesia_table(), node()) -> ok | {error, any()}.
+ensure_table_copy(TableName, Node) ->
+ rabbit_log:debug("Will add a local schema database copy for table '~s'", [TableName]),
+ case mnesia:add_table_copy(TableName, Node, disc_copies) of
+ {atomic, ok} -> ok;
+ {aborted,{already_exists, TableName}} -> ok;
+ {aborted, {already_exists, TableName, _}} -> ok;
+ {aborted, Reason} -> {error, Reason}
+ end.
+
+%% This arity only exists for backwards compatibility with certain
+%% plugins. See https://github.com/rabbitmq/rabbitmq-clusterer/issues/19.
+
+-spec wait_for_replicated() -> 'ok'.
+
+wait_for_replicated() ->
+ wait_for_replicated(false).
+
+-spec wait_for_replicated(retry()) -> 'ok'.
+
+wait_for_replicated(Retry) ->
+ wait([Tab || {Tab, TabDef} <- definitions(),
+ not lists:member({local_content, true}, TabDef)], Retry).
+
+-spec wait([atom()]) -> 'ok'.
+
+wait(TableNames) ->
+ wait(TableNames, _Retry = false).
+
+wait(TableNames, Retry) ->
+ {Timeout, Retries} = retry_timeout(Retry),
+ wait(TableNames, Timeout, Retries).
+
+wait(TableNames, Timeout, Retries) ->
+ %% We might be in ctl here for offline ops, in which case we can't
+ %% get_env() for the rabbit app.
+ rabbit_log:info("Waiting for Mnesia tables for ~p ms, ~p retries left~n",
+ [Timeout, Retries - 1]),
+ Result = case mnesia:wait_for_tables(TableNames, Timeout) of
+ ok ->
+ ok;
+ {timeout, BadTabs} ->
+ AllNodes = rabbit_mnesia:cluster_nodes(all),
+ {error, {timeout_waiting_for_tables, AllNodes, BadTabs}};
+ {error, Reason} ->
+ AllNodes = rabbit_mnesia:cluster_nodes(all),
+ {error, {failed_waiting_for_tables, AllNodes, Reason}}
+ end,
+ case {Retries, Result} of
+ {_, ok} ->
+ rabbit_log:info("Successfully synced tables from a peer"),
+ ok;
+ {1, {error, _} = Error} ->
+ throw(Error);
+ {_, {error, Error}} ->
+ rabbit_log:warning("Error while waiting for Mnesia tables: ~p~n", [Error]),
+ wait(TableNames, Timeout, Retries - 1)
+ end.
+
+retry_timeout(_Retry = false) ->
+ {retry_timeout(), 1};
+retry_timeout(_Retry = true) ->
+ Retries = case application:get_env(rabbit, mnesia_table_loading_retry_limit) of
+ {ok, T} -> T;
+ undefined -> 10
+ end,
+ {retry_timeout(), Retries}.
+
+-spec retry_timeout() -> non_neg_integer() | infinity.
+
+retry_timeout() ->
+ case application:get_env(rabbit, mnesia_table_loading_retry_timeout) of
+ {ok, T} -> T;
+ undefined -> 30000
+ end.
+
+-spec force_load() -> 'ok'.
+
+force_load() -> [mnesia:force_load_table(T) || T <- names()], ok.
+
+-spec is_present() -> boolean().
+
+is_present() -> names() -- mnesia:system_info(tables) =:= [].
+
+-spec is_empty() -> boolean().
+
+is_empty() -> is_empty(names()).
+
+-spec needs_default_data() -> boolean().
+
+needs_default_data() -> is_empty([rabbit_user, rabbit_user_permission,
+ rabbit_vhost]).
+
+is_empty(Names) ->
+ lists:all(fun (Tab) -> mnesia:dirty_first(Tab) == '$end_of_table' end,
+ Names).
+
+-spec check_schema_integrity(retry()) -> rabbit_types:ok_or_error(any()).
+
+check_schema_integrity(Retry) ->
+ Tables = mnesia:system_info(tables),
+ case check(fun (Tab, TabDef) ->
+ case lists:member(Tab, Tables) of
+ false -> {error, {table_missing, Tab}};
+ true -> check_attributes(Tab, TabDef)
+ end
+ end) of
+ ok -> wait(names(), Retry),
+ check(fun check_content/2);
+ Other -> Other
+ end.
+
+-spec clear_ram_only_tables() -> 'ok'.
+
+clear_ram_only_tables() ->
+ Node = node(),
+ lists:foreach(
+ fun (TabName) ->
+ case lists:member(Node, mnesia:table_info(TabName, ram_copies)) of
+ true -> {atomic, ok} = mnesia:clear_table(TabName);
+ false -> ok
+ end
+ end, names()),
+ ok.
+
+%% The sequence in which we delete the schema and then the other
+%% tables is important: if we delete the schema first when moving to
+%% RAM mnesia will loudly complain since it doesn't make much sense to
+%% do that. But when moving to disc, we need to move the schema first.
+
+-spec ensure_local_copies('disc' | 'ram') -> 'ok'.
+
+ensure_local_copies(disc) ->
+ create_local_copy(schema, disc_copies),
+ create_local_copies(disc);
+ensure_local_copies(ram) ->
+ create_local_copies(ram),
+ create_local_copy(schema, ram_copies).
+
+%%--------------------------------------------------------------------
+%% Internal helpers
+%%--------------------------------------------------------------------
+
+create_local_copies(Type) ->
+ lists:foreach(
+ fun ({Tab, TabDef}) ->
+ HasDiscCopies = has_copy_type(TabDef, disc_copies),
+ HasDiscOnlyCopies = has_copy_type(TabDef, disc_only_copies),
+ LocalTab = proplists:get_bool(local_content, TabDef),
+ StorageType =
+ if
+ Type =:= disc orelse LocalTab ->
+ if
+ HasDiscCopies -> disc_copies;
+ HasDiscOnlyCopies -> disc_only_copies;
+ true -> ram_copies
+ end;
+ Type =:= ram ->
+ ram_copies
+ end,
+ ok = create_local_copy(Tab, StorageType)
+ end, definitions(Type)),
+ ok.
+
+create_local_copy(Tab, Type) ->
+ StorageType = mnesia:table_info(Tab, storage_type),
+ {atomic, ok} =
+ if
+ StorageType == unknown ->
+ mnesia:add_table_copy(Tab, node(), Type);
+ StorageType /= Type ->
+ mnesia:change_table_copy_type(Tab, node(), Type);
+ true -> {atomic, ok}
+ end,
+ ok.
+
+has_copy_type(TabDef, DiscType) ->
+ lists:member(node(), proplists:get_value(DiscType, TabDef, [])).
+
+check_attributes(Tab, TabDef) ->
+ {_, ExpAttrs} = proplists:lookup(attributes, TabDef),
+ case mnesia:table_info(Tab, attributes) of
+ ExpAttrs -> ok;
+ Attrs -> {error, {table_attributes_mismatch, Tab, ExpAttrs, Attrs}}
+ end.
+
+check_content(Tab, TabDef) ->
+ {_, Match} = proplists:lookup(match, TabDef),
+ case mnesia:dirty_first(Tab) of
+ '$end_of_table' ->
+ ok;
+ Key ->
+ ObjList = mnesia:dirty_read(Tab, Key),
+ MatchComp = ets:match_spec_compile([{Match, [], ['$_']}]),
+ case ets:match_spec_run(ObjList, MatchComp) of
+ ObjList -> ok;
+ _ -> {error, {table_content_invalid, Tab, Match, ObjList}}
+ end
+ end.
+
+check(Fun) ->
+ case [Error || {Tab, TabDef} <- definitions(),
+ begin
+ {Ret, Error} = case Fun(Tab, TabDef) of
+ ok -> {false, none};
+ {error, E} -> {true, E}
+ end,
+ Ret
+ end] of
+ [] -> ok;
+ Errors -> {error, Errors}
+ end.
+
+%%--------------------------------------------------------------------
+%% Table definitions
+%%--------------------------------------------------------------------
+
+names() -> [Tab || {Tab, _} <- definitions()].
+
+%% The tables aren't supposed to be on disk on a ram node
+definitions(disc) ->
+ definitions();
+definitions(ram) ->
+ [{Tab, [{disc_copies, []}, {ram_copies, [node()]} |
+ proplists:delete(
+ ram_copies, proplists:delete(disc_copies, TabDef))]} ||
+ {Tab, TabDef} <- definitions()].
+
+definitions() ->
+ [{rabbit_user,
+ [{record_name, internal_user},
+ {attributes, internal_user:fields()},
+ {disc_copies, [node()]},
+ {match, internal_user:pattern_match_all()}]},
+ {rabbit_user_permission,
+ [{record_name, user_permission},
+ {attributes, record_info(fields, user_permission)},
+ {disc_copies, [node()]},
+ {match, #user_permission{user_vhost = #user_vhost{_='_'},
+ permission = #permission{_='_'},
+ _='_'}}]},
+ {rabbit_topic_permission,
+ [{record_name, topic_permission},
+ {attributes, record_info(fields, topic_permission)},
+ {disc_copies, [node()]},
+ {match, #topic_permission{topic_permission_key = #topic_permission_key{_='_'},
+ permission = #permission{_='_'},
+ _='_'}}]},
+ {rabbit_vhost,
+ [
+ {record_name, vhost},
+ {attributes, vhost:fields()},
+ {disc_copies, [node()]},
+ {match, vhost:pattern_match_all()}]},
+ {rabbit_listener,
+ [{record_name, listener},
+ {attributes, record_info(fields, listener)},
+ {type, bag},
+ {match, #listener{_='_'}}]},
+ {rabbit_durable_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {disc_copies, [node()]},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_semi_durable_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {type, ordered_set},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {type, ordered_set},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_reverse_route,
+ [{record_name, reverse_route},
+ {attributes, record_info(fields, reverse_route)},
+ {type, ordered_set},
+ {match, #reverse_route{reverse_binding = reverse_binding_match(),
+ _='_'}}]},
+ {rabbit_topic_trie_node,
+ [{record_name, topic_trie_node},
+ {attributes, record_info(fields, topic_trie_node)},
+ {type, ordered_set},
+ {match, #topic_trie_node{trie_node = trie_node_match(), _='_'}}]},
+ {rabbit_topic_trie_edge,
+ [{record_name, topic_trie_edge},
+ {attributes, record_info(fields, topic_trie_edge)},
+ {type, ordered_set},
+ {match, #topic_trie_edge{trie_edge = trie_edge_match(), _='_'}}]},
+ {rabbit_topic_trie_binding,
+ [{record_name, topic_trie_binding},
+ {attributes, record_info(fields, topic_trie_binding)},
+ {type, ordered_set},
+ {match, #topic_trie_binding{trie_binding = trie_binding_match(),
+ _='_'}}]},
+ {rabbit_durable_exchange,
+ [{record_name, exchange},
+ {attributes, record_info(fields, exchange)},
+ {disc_copies, [node()]},
+ {match, #exchange{name = exchange_name_match(), _='_'}}]},
+ {rabbit_exchange,
+ [{record_name, exchange},
+ {attributes, record_info(fields, exchange)},
+ {match, #exchange{name = exchange_name_match(), _='_'}}]},
+ {rabbit_exchange_serial,
+ [{record_name, exchange_serial},
+ {attributes, record_info(fields, exchange_serial)},
+ {match, #exchange_serial{name = exchange_name_match(), _='_'}}]},
+ {rabbit_runtime_parameters,
+ [{record_name, runtime_parameters},
+ {attributes, record_info(fields, runtime_parameters)},
+ {disc_copies, [node()]},
+ {match, #runtime_parameters{_='_'}}]},
+ {rabbit_durable_queue,
+ [{record_name, amqqueue},
+ {attributes, amqqueue:fields()},
+ {disc_copies, [node()]},
+ {match, amqqueue:pattern_match_on_name(queue_name_match())}]},
+ {rabbit_queue,
+ [{record_name, amqqueue},
+ {attributes, amqqueue:fields()},
+ {match, amqqueue:pattern_match_on_name(queue_name_match())}]}
+ ]
+ ++ gm:table_definitions()
+ ++ mirrored_supervisor:table_definitions().
+
+binding_match() ->
+ #binding{source = exchange_name_match(),
+ destination = binding_destination_match(),
+ _='_'}.
+reverse_binding_match() ->
+ #reverse_binding{destination = binding_destination_match(),
+ source = exchange_name_match(),
+ _='_'}.
+binding_destination_match() ->
+ resource_match('_').
+trie_node_match() ->
+ #trie_node{exchange_name = exchange_name_match(), _='_'}.
+trie_edge_match() ->
+ #trie_edge{exchange_name = exchange_name_match(), _='_'}.
+trie_binding_match() ->
+ #trie_binding{exchange_name = exchange_name_match(), _='_'}.
+exchange_name_match() ->
+ resource_match(exchange).
+queue_name_match() ->
+ resource_match(queue).
+resource_match(Kind) ->
+ #resource{kind = Kind, _='_'}.
diff --git a/deps/rabbit/src/rabbit_trace.erl b/deps/rabbit/src/rabbit_trace.erl
new file mode 100644
index 0000000000..74b892330e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_trace.erl
@@ -0,0 +1,128 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_trace).
+
+-export([init/1, enabled/1, tap_in/6, tap_out/5, start/1, stop/1]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-define(TRACE_VHOSTS, trace_vhosts).
+-define(XNAME, <<"amq.rabbitmq.trace">>).
+
+%%----------------------------------------------------------------------------
+
+-type state() :: rabbit_types:exchange() | 'none'.
+
+%%----------------------------------------------------------------------------
+
+-spec init(rabbit_types:vhost()) -> state().
+
+init(VHost) ->
+ case enabled(VHost) of
+ false -> none;
+ true -> {ok, X} = rabbit_exchange:lookup(
+ rabbit_misc:r(VHost, exchange, ?XNAME)),
+ X
+ end.
+
+-spec enabled(rabbit_types:vhost()) -> boolean().
+
+enabled(VHost) ->
+ {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS),
+ lists:member(VHost, VHosts).
+
+-spec tap_in(rabbit_types:basic_message(), [rabbit_amqqueue:name()],
+ binary(), rabbit_channel:channel_number(),
+ rabbit_types:username(), state()) -> 'ok'.
+
+tap_in(_Msg, _QNames, _ConnName, _ChannelNum, _Username, none) -> ok;
+tap_in(Msg = #basic_message{exchange_name = #resource{name = XName,
+ virtual_host = VHost}},
+ QNames, ConnName, ChannelNum, Username, TraceX) ->
+ trace(TraceX, Msg, <<"publish">>, XName,
+ [{<<"vhost">>, longstr, VHost},
+ {<<"connection">>, longstr, ConnName},
+ {<<"channel">>, signedint, ChannelNum},
+ {<<"user">>, longstr, Username},
+ {<<"routed_queues">>, array,
+ [{longstr, QName#resource.name} || QName <- QNames]}]).
+
+-spec tap_out(rabbit_amqqueue:qmsg(), binary(),
+ rabbit_channel:channel_number(),
+ rabbit_types:username(), state()) -> 'ok'.
+
+tap_out(_Msg, _ConnName, _ChannelNum, _Username, none) -> ok;
+tap_out({#resource{name = QName, virtual_host = VHost},
+ _QPid, _QMsgId, Redelivered, Msg},
+ ConnName, ChannelNum, Username, TraceX) ->
+ RedeliveredNum = case Redelivered of true -> 1; false -> 0 end,
+ trace(TraceX, Msg, <<"deliver">>, QName,
+ [{<<"redelivered">>, signedint, RedeliveredNum},
+ {<<"vhost">>, longstr, VHost},
+ {<<"connection">>, longstr, ConnName},
+ {<<"channel">>, signedint, ChannelNum},
+ {<<"user">>, longstr, Username}]).
+
+%%----------------------------------------------------------------------------
+
+-spec start(rabbit_types:vhost()) -> 'ok'.
+
+start(VHost) ->
+ rabbit_log:info("Enabling tracing for vhost '~s'~n", [VHost]),
+ update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end).
+
+-spec stop(rabbit_types:vhost()) -> 'ok'.
+
+stop(VHost) ->
+ rabbit_log:info("Disabling tracing for vhost '~s'~n", [VHost]),
+ update_config(fun (VHosts) -> VHosts -- [VHost] end).
+
+update_config(Fun) ->
+ {ok, VHosts0} = application:get_env(rabbit, ?TRACE_VHOSTS),
+ VHosts = Fun(VHosts0),
+ application:set_env(rabbit, ?TRACE_VHOSTS, VHosts),
+ rabbit_channel:refresh_config_local(),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+trace(#exchange{name = Name}, #basic_message{exchange_name = Name},
+ _RKPrefix, _RKSuffix, _Extra) ->
+ ok;
+trace(X, Msg = #basic_message{content = #content{payload_fragments_rev = PFR}},
+ RKPrefix, RKSuffix, Extra) ->
+ ok = rabbit_basic:publish(
+ X, <<RKPrefix/binary, ".", RKSuffix/binary>>,
+ #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, PFR),
+ ok.
+
+msg_to_table(#basic_message{exchange_name = #resource{name = XName},
+ routing_keys = RoutingKeys,
+ content = Content}) ->
+ #content{properties = Props} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ {PropsTable, _Ix} =
+ lists:foldl(fun (K, {L, Ix}) ->
+ V = element(Ix, Props),
+ NewL = case V of
+ undefined -> L;
+ _ -> [{a2b(K), type(V), V} | L]
+ end,
+ {NewL, Ix + 1}
+ end, {[], 2}, record_info(fields, 'P_basic')),
+ [{<<"exchange_name">>, longstr, XName},
+ {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]},
+ {<<"properties">>, table, PropsTable},
+ {<<"node">>, longstr, a2b(node())}].
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+type(V) when is_list(V) -> table;
+type(V) when is_integer(V) -> signedint;
+type(_V) -> longstr.
diff --git a/deps/rabbit/src/rabbit_tracking.erl b/deps/rabbit/src/rabbit_tracking.erl
new file mode 100644
index 0000000000..a124d20226
--- /dev/null
+++ b/deps/rabbit/src/rabbit_tracking.erl
@@ -0,0 +1,103 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracking).
+
+%% Common behaviour and processing functions for tracking components
+%%
+%% See in use:
+%% * rabbit_connection_tracking
+%% * rabbit_channel_tracking
+
+-callback boot() -> ok.
+-callback update_tracked(term()) -> ok.
+-callback handle_cast(term()) -> ok.
+-callback register_tracked(
+ rabbit_types:tracked_connection() |
+ rabbit_types:tracked_channel()) -> 'ok'.
+-callback unregister_tracked(
+ rabbit_types:tracked_connection_id() |
+ rabbit_types:tracked_channel_id()) -> 'ok'.
+-callback count_tracked_items_in(term()) -> non_neg_integer().
+-callback clear_tracking_tables() -> 'ok'.
+-callback shutdown_tracked_items(list(), term()) -> ok.
+
+-export([id/2, count_tracked_items/4, match_tracked_items/2,
+ clear_tracking_table/1, delete_tracking_table/3,
+ delete_tracked_entry/3]).
+
+%%----------------------------------------------------------------------------
+
+-spec id(atom(), term()) ->
+ rabbit_types:tracked_connection_id() | rabbit_types:tracked_channel_id().
+
+id(Node, Name) -> {Node, Name}.
+
+-spec count_tracked_items(function(), integer(), term(), string()) ->
+ non_neg_integer().
+
+count_tracked_items(TableNameFun, CountRecPosition, Key, ContextMsg) ->
+ lists:foldl(fun (Node, Acc) ->
+ Tab = TableNameFun(Node),
+ try
+ N = case mnesia:dirty_read(Tab, Key) of
+ [] -> 0;
+ [Val] ->
+ element(CountRecPosition, Val)
+ end,
+ Acc + N
+ catch _:Err ->
+ rabbit_log:error(
+ "Failed to fetch number of ~p ~p on node ~p:~n~p~n",
+ [ContextMsg, Key, Node, Err]),
+ Acc
+ end
+ end, 0, rabbit_nodes:all_running()).
+
+-spec match_tracked_items(function(), tuple()) -> term().
+
+match_tracked_items(TableNameFun, MatchSpec) ->
+ lists:foldl(
+ fun (Node, Acc) ->
+ Tab = TableNameFun(Node),
+ Acc ++ mnesia:dirty_match_object(
+ Tab,
+ MatchSpec)
+ end, [], rabbit_nodes:all_running()).
+
+-spec clear_tracking_table(atom()) -> ok.
+
+clear_tracking_table(TableName) ->
+ case mnesia:clear_table(TableName) of
+ {atomic, ok} -> ok;
+ {aborted, _} -> ok
+ end.
+
+-spec delete_tracking_table(atom(), node(), string()) -> ok.
+
+delete_tracking_table(TableName, Node, ContextMsg) ->
+ case mnesia:delete_table(TableName) of
+ {atomic, ok} -> ok;
+ {aborted, {no_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to delete a ~p table for node ~p: ~p",
+ [ContextMsg, Node, Error]),
+ ok
+ end.
+
+-spec delete_tracked_entry({atom(), atom(), list()}, function(), term()) -> ok.
+
+delete_tracked_entry(_ExistsCheckSpec = {M, F, A}, TableNameFun, Key) ->
+ ClusterNodes = rabbit_nodes:all_running(),
+ ExistsInCluster =
+ lists:any(fun(Node) -> rpc:call(Node, M, F, A) end, ClusterNodes),
+ case ExistsInCluster of
+ false ->
+ [mnesia:dirty_delete(TableNameFun(Node), Key) || Node <- ClusterNodes];
+ true ->
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_upgrade.erl b/deps/rabbit/src/rabbit_upgrade.erl
new file mode 100644
index 0000000000..b1b128fecc
--- /dev/null
+++ b/deps/rabbit/src/rabbit_upgrade.erl
@@ -0,0 +1,314 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_upgrade).
+
+-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0,
+ maybe_migrate_queues_to_per_vhost_storage/0,
+ nodes_running/1, secondary_upgrade/1]).
+
+-include("rabbit.hrl").
+
+-define(VERSION_FILENAME, "schema_version").
+-define(LOCK_FILENAME, "schema_upgrade_lock").
+
+%% -------------------------------------------------------------------
+
+%% The upgrade logic is quite involved, due to the existence of
+%% clusters.
+%%
+%% Firstly, we have two different types of upgrades to do: Mnesia and
+%% everything else. Mnesia upgrades must only be done by one node in
+%% the cluster (we treat a non-clustered node as a single-node
+%% cluster). This is the primary upgrader. The other upgrades need to
+%% be done by all nodes.
+%%
+%% The primary upgrader has to start first (and do its Mnesia
+%% upgrades). Secondary upgraders need to reset their Mnesia database
+%% and then rejoin the cluster. They can't do the Mnesia upgrades as
+%% well and then merge databases since the cookie for each table will
+%% end up different and the merge will fail.
+%%
+%% This in turn means that we need to determine whether we are the
+%% primary or secondary upgrader *before* Mnesia comes up. If we
+%% didn't then the secondary upgrader would try to start Mnesia, and
+%% either hang waiting for a node which is not yet up, or fail since
+%% its schema differs from the other nodes in the cluster.
+%%
+%% Also, the primary upgrader needs to start Mnesia to do its
+%% upgrades, but needs to forcibly load tables rather than wait for
+%% them (in case it was not the last node to shut down, in which case
+%% it would wait forever).
+%%
+%% This in turn means that maybe_upgrade_mnesia/0 has to be patched
+%% into the boot process by prelaunch before the mnesia application is
+%% started. By the time Mnesia is started the upgrades have happened
+%% (on the primary), or Mnesia has been reset (on the secondary) and
+%% rabbit_mnesia:init_db_unchecked/2 can then make the node rejoin the cluster
+%% in the normal way.
+%%
+%% The non-mnesia upgrades are then triggered by
+%% rabbit_mnesia:init_db_unchecked/2. Of course, it's possible for a given
+%% upgrade process to only require Mnesia upgrades, or only require
+%% non-Mnesia upgrades. In the latter case no Mnesia resets and
+%% reclusterings occur.
+%%
+%% The primary upgrader needs to be a disc node. Ideally we would like
+%% it to be the last disc node to shut down (since otherwise there's a
+%% risk of data loss). On each node we therefore record the disc nodes
+%% that were still running when we shut down. A disc node that knows
+%% other nodes were up when it shut down, or a ram node, will refuse
+%% to be the primary upgrader, and will thus not start when upgrades
+%% are needed.
+%%
+%% However, this is racy if several nodes are shut down at once. Since
+%% rabbit records the running nodes, and shuts down before mnesia, the
+%% race manifests as all disc nodes thinking they are not the primary
+%% upgrader. Therefore the user can remove the record of the last disc
+%% node to shut down to get things going again. This may lose any
+%% mnesia changes that happened after the node chosen as the primary
+%% upgrader was shut down.
+
+%% -------------------------------------------------------------------
+
+ensure_backup_taken() ->
+ case filelib:is_file(lock_filename()) of
+ false -> case filelib:is_dir(backup_dir()) of
+ false -> ok = take_backup();
+ _ -> ok
+ end;
+ true ->
+ rabbit_log:error("Found lock file at ~s.
+ Either previous upgrade is in progress or has failed.
+ Database backup path: ~s",
+ [lock_filename(), backup_dir()]),
+ throw({error, previous_upgrade_failed})
+ end.
+
+take_backup() ->
+ BackupDir = backup_dir(),
+ info("upgrades: Backing up mnesia dir to ~p~n", [BackupDir]),
+ case rabbit_mnesia:copy_db(BackupDir) of
+ ok -> info("upgrades: Mnesia dir backed up to ~p~n",
+ [BackupDir]);
+ {error, E} -> throw({could_not_back_up_mnesia_dir, E, BackupDir})
+ end.
+
+ensure_backup_removed() ->
+ case filelib:is_dir(backup_dir()) of
+ true -> ok = remove_backup();
+ _ -> ok
+ end.
+
+remove_backup() ->
+ ok = rabbit_file:recursive_delete([backup_dir()]),
+ info("upgrades: Mnesia backup removed~n", []).
+
+-spec maybe_upgrade_mnesia() -> 'ok'.
+
+maybe_upgrade_mnesia() ->
+ AllNodes = rabbit_mnesia:cluster_nodes(all),
+ ok = rabbit_mnesia_rename:maybe_finish(AllNodes),
+ %% Mnesia upgrade is the first upgrade scope,
+ %% so we should create a backup here if there are any upgrades
+ case rabbit_version:all_upgrades_required([mnesia, local, message_store]) of
+ {error, starting_from_scratch} ->
+ ok;
+ {error, version_not_available} ->
+ case AllNodes of
+ [] -> die("Cluster upgrade needed but upgrading from "
+ "< 2.1.1.~nUnfortunately you will need to "
+ "rebuild the cluster.", []);
+ _ -> ok
+ end;
+ {error, _} = Err ->
+ throw(Err);
+ {ok, []} ->
+ ok;
+ {ok, Upgrades} ->
+ ensure_backup_taken(),
+ run_mnesia_upgrades(proplists:get_value(mnesia, Upgrades, []),
+ AllNodes)
+ end.
+
+run_mnesia_upgrades([], _) -> ok;
+run_mnesia_upgrades(Upgrades, AllNodes) ->
+ case upgrade_mode(AllNodes) of
+ primary -> primary_upgrade(Upgrades, AllNodes);
+ secondary -> secondary_upgrade(AllNodes)
+ end.
+
+upgrade_mode(AllNodes) ->
+ case nodes_running(AllNodes) of
+ [] ->
+ AfterUs = rabbit_nodes:all_running() -- [node()],
+ case {node_type_legacy(), AfterUs} of
+ {disc, []} ->
+ primary;
+ {disc, _} ->
+ Filename = rabbit_node_monitor:running_nodes_filename(),
+ die("Cluster upgrade needed but other disc nodes shut "
+ "down after this one.~nPlease first start the last "
+ "disc node to shut down.~n~nNote: if several disc "
+ "nodes were shut down simultaneously they may "
+ "all~nshow this message. In which case, remove "
+ "the lock file on one of them and~nstart that node. "
+ "The lock file on this node is:~n~n ~s ", [Filename]);
+ {ram, _} ->
+ die("Cluster upgrade needed but this is a ram node.~n"
+ "Please first start the last disc node to shut down.",
+ [])
+ end;
+ [Another|_] ->
+ MyVersion = rabbit_version:desired_for_scope(mnesia),
+ case rpc:call(Another, rabbit_version, desired_for_scope,
+ [mnesia]) of
+ {badrpc, {'EXIT', {undef, _}}} ->
+ die_because_cluster_upgrade_needed(unknown_old_version,
+ MyVersion);
+ {badrpc, Reason} ->
+ die_because_cluster_upgrade_needed({unknown, Reason},
+ MyVersion);
+ CV -> case rabbit_version:matches(
+ MyVersion, CV) of
+ true -> secondary;
+ false -> die_because_cluster_upgrade_needed(
+ CV, MyVersion)
+ end
+ end
+ end.
+
+-spec die_because_cluster_upgrade_needed(any(), any()) -> no_return().
+
+die_because_cluster_upgrade_needed(ClusterVersion, MyVersion) ->
+ %% The other node(s) are running an
+ %% unexpected version.
+ die("Cluster upgrade needed but other nodes are "
+ "running ~p~nand I want ~p",
+ [ClusterVersion, MyVersion]).
+
+-spec die(string(), list()) -> no_return().
+
+die(Msg, Args) ->
+ %% We don't throw or exit here since that gets thrown
+ %% straight out into do_boot, generating an erl_crash.dump
+ %% and displaying any error message in a confusing way.
+ rabbit_log:error(Msg, Args),
+ Str = rabbit_misc:format(
+ "~n~n****~n~n" ++ Msg ++ "~n~n****~n~n~n", Args),
+ io:format(Str),
+ error_logger:logfile(close),
+ case application:get_env(rabbit, halt_on_upgrade_failure) of
+ {ok, false} -> throw({upgrade_error, Str});
+ _ -> halt(1) %% i.e. true or undefined
+ end.
+
+primary_upgrade(Upgrades, Nodes) ->
+ Others = Nodes -- [node()],
+ ok = apply_upgrades(
+ mnesia,
+ Upgrades,
+ fun () ->
+ rabbit_table:force_load(),
+ case Others of
+ [] -> ok;
+ _ -> info("mnesia upgrades: Breaking cluster~n", []),
+ [{atomic, ok} = mnesia:del_table_copy(schema, Node)
+ || Node <- Others]
+ end
+ end),
+ ok.
+
+secondary_upgrade(AllNodes) ->
+ %% must do this before we wipe out schema
+ NodeType = node_type_legacy(),
+ rabbit_misc:ensure_ok(mnesia:delete_schema([node()]),
+ cannot_delete_schema),
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ ok = rabbit_mnesia:init_db_unchecked(AllNodes, NodeType),
+ ok = rabbit_version:record_desired_for_scope(mnesia),
+ ok.
+
+nodes_running(Nodes) ->
+ [N || N <- Nodes, rabbit:is_running(N)].
+
+%% -------------------------------------------------------------------
+
+-spec maybe_upgrade_local() ->
+ 'ok' |
+ 'version_not_available' |
+ 'starting_from_scratch'.
+
+maybe_upgrade_local() ->
+ case rabbit_version:upgrades_required(local) of
+ {error, version_not_available} -> version_not_available;
+ {error, starting_from_scratch} -> starting_from_scratch;
+ {error, _} = Err -> throw(Err);
+ {ok, []} -> ensure_backup_removed(),
+ ok;
+ {ok, Upgrades} -> mnesia:stop(),
+ ok = apply_upgrades(local, Upgrades,
+ fun () -> ok end),
+ ok
+ end.
+
+%% -------------------------------------------------------------------
+
+maybe_migrate_queues_to_per_vhost_storage() ->
+ Result = case rabbit_version:upgrades_required(message_store) of
+ {error, version_not_available} -> version_not_available;
+ {error, starting_from_scratch} ->
+ starting_from_scratch;
+ {error, _} = Err -> throw(Err);
+ {ok, []} -> ok;
+ {ok, Upgrades} -> apply_upgrades(message_store,
+ Upgrades,
+ fun() -> ok end),
+ ok
+ end,
+ %% Message store upgrades should be
+ %% the last group.
+ %% Backup can be deleted here.
+ ensure_backup_removed(),
+ Result.
+
+%% -------------------------------------------------------------------
+
+apply_upgrades(Scope, Upgrades, Fun) ->
+ ok = rabbit_file:lock_file(lock_filename()),
+ info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]),
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ Fun(),
+ [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades],
+ info("~s upgrades: All upgrades applied successfully~n", [Scope]),
+ ok = rabbit_version:record_desired_for_scope(Scope),
+ ok = file:delete(lock_filename()).
+
+apply_upgrade(Scope, {M, F}) ->
+ info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]),
+ ok = apply(M, F, []).
+
+%% -------------------------------------------------------------------
+
+dir() -> rabbit_mnesia:dir().
+
+lock_filename() -> lock_filename(dir()).
+lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME).
+backup_dir() -> dir() ++ "-upgrade-backup".
+
+node_type_legacy() ->
+ %% This is pretty ugly but we can't start Mnesia and ask it (will
+ %% hang), we can't look at the config file (may not include us
+ %% even if we're a disc node). We also can't use
+ %% rabbit_mnesia:node_type/0 because that will give false
+ %% positives on Rabbit up to 2.5.1.
+ case filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")) of
+ true -> disc;
+ false -> ram
+ end.
+
+info(Msg, Args) -> rabbit_log:info(Msg, Args).
diff --git a/deps/rabbit/src/rabbit_upgrade_functions.erl b/deps/rabbit/src/rabbit_upgrade_functions.erl
new file mode 100644
index 0000000000..59417c72bb
--- /dev/null
+++ b/deps/rabbit/src/rabbit_upgrade_functions.erl
@@ -0,0 +1,662 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_upgrade_functions).
+
+%% If you are tempted to add include("rabbit.hrl"). here, don't. Using record
+%% defs here leads to pain later.
+
+-compile([nowarn_export_all, export_all]).
+
+-rabbit_upgrade({remove_user_scope, mnesia, []}).
+-rabbit_upgrade({hash_passwords, mnesia, []}).
+-rabbit_upgrade({add_ip_to_listener, mnesia, []}).
+-rabbit_upgrade({add_opts_to_listener, mnesia, [add_ip_to_listener]}).
+-rabbit_upgrade({internal_exchanges, mnesia, []}).
+-rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}).
+-rabbit_upgrade({topic_trie, mnesia, []}).
+-rabbit_upgrade({semi_durable_route, mnesia, []}).
+-rabbit_upgrade({exchange_event_serial, mnesia, []}).
+-rabbit_upgrade({trace_exchanges, mnesia, [internal_exchanges]}).
+-rabbit_upgrade({user_admin_to_tags, mnesia, [user_to_internal_user]}).
+-rabbit_upgrade({ha_mirrors, mnesia, []}).
+-rabbit_upgrade({gm, mnesia, []}).
+-rabbit_upgrade({exchange_scratch, mnesia, [trace_exchanges]}).
+-rabbit_upgrade({mirrored_supervisor, mnesia, []}).
+-rabbit_upgrade({topic_trie_node, mnesia, []}).
+-rabbit_upgrade({runtime_parameters, mnesia, []}).
+-rabbit_upgrade({exchange_scratches, mnesia, [exchange_scratch]}).
+-rabbit_upgrade({policy, mnesia,
+ [exchange_scratches, ha_mirrors]}).
+-rabbit_upgrade({sync_slave_pids, mnesia, [policy]}).
+-rabbit_upgrade({no_mirror_nodes, mnesia, [sync_slave_pids]}).
+-rabbit_upgrade({gm_pids, mnesia, [no_mirror_nodes]}).
+-rabbit_upgrade({exchange_decorators, mnesia, [policy]}).
+-rabbit_upgrade({policy_apply_to, mnesia, [runtime_parameters]}).
+-rabbit_upgrade({queue_decorators, mnesia, [gm_pids]}).
+-rabbit_upgrade({internal_system_x, mnesia, [exchange_decorators]}).
+-rabbit_upgrade({cluster_name, mnesia, [runtime_parameters]}).
+-rabbit_upgrade({down_slave_nodes, mnesia, [queue_decorators]}).
+-rabbit_upgrade({queue_state, mnesia, [down_slave_nodes]}).
+-rabbit_upgrade({recoverable_slaves, mnesia, [queue_state]}).
+-rabbit_upgrade({policy_version, mnesia, [recoverable_slaves]}).
+-rabbit_upgrade({slave_pids_pending_shutdown, mnesia, [policy_version]}).
+-rabbit_upgrade({user_password_hashing, mnesia, [hash_passwords]}).
+-rabbit_upgrade({operator_policies, mnesia, [slave_pids_pending_shutdown, internal_system_x]}).
+-rabbit_upgrade({vhost_limits, mnesia, []}).
+-rabbit_upgrade({queue_vhost_field, mnesia, [operator_policies]}).
+-rabbit_upgrade({topic_permission, mnesia, []}).
+-rabbit_upgrade({queue_options, mnesia, [queue_vhost_field]}).
+-rabbit_upgrade({exchange_options, mnesia, [operator_policies]}).
+
+%% -------------------------------------------------------------------
+
+%% replaces vhost.dummy (used to avoid having a single-field record
+%% which Mnesia doesn't like) with vhost.limits (which is actually
+%% used)
+
+-spec vhost_limits() -> 'ok'.
+
+vhost_limits() ->
+ transform(
+ rabbit_vhost,
+ fun ({vhost, VHost, _Dummy}) ->
+ {vhost, VHost, undefined}
+ end,
+ [virtual_host, limits]).
+
+%% It's a bad idea to use records or record_info here, even for the
+%% destination form. Because in the future, the destination form of
+%% your current transform may not match the record any more, and it
+%% would be messy to have to go back and fix old transforms at that
+%% point.
+
+-spec remove_user_scope() -> 'ok'.
+
+remove_user_scope() ->
+ transform(
+ rabbit_user_permission,
+ fun ({user_permission, UV, {permission, _Scope, Conf, Write, Read}}) ->
+ {user_permission, UV, {permission, Conf, Write, Read}}
+ end,
+ [user_vhost, permission]).
+
+%% this is an early migration that hashes passwords using MD5,
+%% only relevant to those migrating from 2.1.1.
+%% all users created after in 3.6.0 or later will use SHA-256 (unless configured
+%% otherwise)
+
+-spec hash_passwords() -> 'ok'.
+
+hash_passwords() ->
+ transform(
+ rabbit_user,
+ fun ({user, Username, Password, IsAdmin}) ->
+ Hash = rabbit_auth_backend_internal:hash_password(rabbit_password_hashing_md5, Password),
+ {user, Username, Hash, IsAdmin}
+ end,
+ [username, password_hash, is_admin]).
+
+-spec add_ip_to_listener() -> 'ok'.
+
+add_ip_to_listener() ->
+ transform(
+ rabbit_listener,
+ fun ({listener, Node, Protocol, Host, Port}) ->
+ {listener, Node, Protocol, Host, {0,0,0,0}, Port}
+ end,
+ [node, protocol, host, ip_address, port]).
+
+-spec add_opts_to_listener() -> 'ok'.
+
+add_opts_to_listener() ->
+ transform(
+ rabbit_listener,
+ fun ({listener, Node, Protocol, Host, IP, Port}) ->
+ {listener, Node, Protocol, Host, IP, Port, []}
+ end,
+ [node, protocol, host, ip_address, port, opts]).
+
+-spec internal_exchanges() -> 'ok'.
+
+internal_exchanges() ->
+ Tables = [rabbit_exchange, rabbit_durable_exchange],
+ AddInternalFun =
+ fun ({exchange, Name, Type, Durable, AutoDelete, Args}) ->
+ {exchange, Name, Type, Durable, AutoDelete, false, Args}
+ end,
+ [ ok = transform(T,
+ AddInternalFun,
+ [name, type, durable, auto_delete, internal, arguments])
+ || T <- Tables ],
+ ok.
+
+-spec user_to_internal_user() -> 'ok'.
+
+user_to_internal_user() ->
+ transform(
+ rabbit_user,
+ fun({user, Username, PasswordHash, IsAdmin}) ->
+ {internal_user, Username, PasswordHash, IsAdmin}
+ end,
+ [username, password_hash, is_admin], internal_user).
+
+-spec topic_trie() -> 'ok'.
+
+topic_trie() ->
+ create(rabbit_topic_trie_edge, [{record_name, topic_trie_edge},
+ {attributes, [trie_edge, node_id]},
+ {type, ordered_set}]),
+ create(rabbit_topic_trie_binding, [{record_name, topic_trie_binding},
+ {attributes, [trie_binding, value]},
+ {type, ordered_set}]).
+
+-spec semi_durable_route() -> 'ok'.
+
+semi_durable_route() ->
+ create(rabbit_semi_durable_route, [{record_name, route},
+ {attributes, [binding, value]}]).
+
+-spec exchange_event_serial() -> 'ok'.
+
+exchange_event_serial() ->
+ create(rabbit_exchange_serial, [{record_name, exchange_serial},
+ {attributes, [name, next]}]).
+
+-spec trace_exchanges() -> 'ok'.
+
+trace_exchanges() ->
+ [declare_exchange(
+ rabbit_misc:r(VHost, exchange, <<"amq.rabbitmq.trace">>), topic) ||
+ VHost <- rabbit_vhost:list_names()],
+ ok.
+
+-spec user_admin_to_tags() -> 'ok'.
+
+user_admin_to_tags() ->
+ transform(
+ rabbit_user,
+ fun({internal_user, Username, PasswordHash, true}) ->
+ {internal_user, Username, PasswordHash, [administrator]};
+ ({internal_user, Username, PasswordHash, false}) ->
+ {internal_user, Username, PasswordHash, [management]}
+ end,
+ [username, password_hash, tags], internal_user).
+
+-spec ha_mirrors() -> 'ok'.
+
+ha_mirrors() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddMirrorPidsFun =
+ fun ({amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid}) ->
+ {amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid,
+ [], undefined}
+ end,
+ [ ok = transform(T,
+ AddMirrorPidsFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, mirror_nodes])
+ || T <- Tables ],
+ ok.
+
+-spec gm() -> 'ok'.
+
+gm() ->
+ create(gm_group, [{record_name, gm_group},
+ {attributes, [name, version, members]}]).
+
+-spec exchange_scratch() -> 'ok'.
+
+exchange_scratch() ->
+ ok = exchange_scratch(rabbit_exchange),
+ ok = exchange_scratch(rabbit_durable_exchange).
+
+exchange_scratch(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratch]).
+
+-spec mirrored_supervisor() -> 'ok'.
+
+mirrored_supervisor() ->
+ create(mirrored_sup_childspec,
+ [{record_name, mirrored_sup_childspec},
+ {attributes, [key, mirroring_pid, childspec]}]).
+
+-spec topic_trie_node() -> 'ok'.
+
+topic_trie_node() ->
+ create(rabbit_topic_trie_node,
+ [{record_name, topic_trie_node},
+ {attributes, [trie_node, edge_count, binding_count]},
+ {type, ordered_set}]).
+
+-spec runtime_parameters() -> 'ok'.
+
+runtime_parameters() ->
+ create(rabbit_runtime_parameters,
+ [{record_name, runtime_parameters},
+ {attributes, [key, value]},
+ {disc_copies, [node()]}]).
+
+exchange_scratches() ->
+ ok = exchange_scratches(rabbit_exchange),
+ ok = exchange_scratches(rabbit_durable_exchange).
+
+exchange_scratches(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type = <<"x-federation">>, Dur, AutoDel, Int, Args,
+ Scratch}) ->
+ Scratches = orddict:store(federation, Scratch, orddict:new()),
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches};
+ %% We assert here that nothing else uses the scratch mechanism ATM
+ ({exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches]).
+
+-spec policy() -> 'ok'.
+
+policy() ->
+ ok = exchange_policy(rabbit_exchange),
+ ok = exchange_policy(rabbit_durable_exchange),
+ ok = queue_policy(rabbit_queue),
+ ok = queue_policy(rabbit_durable_queue).
+
+exchange_policy(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches,
+ undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches,
+ policy]).
+
+queue_policy(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Dur, AutoDel, Excl, Args, Pid, SPids, MNodes}) ->
+ {amqqueue, Name, Dur, AutoDel, Excl, Args, Pid, SPids, MNodes,
+ undefined}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid,
+ slave_pids, mirror_nodes, policy]).
+
+-spec sync_slave_pids() -> 'ok'.
+
+sync_slave_pids() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddSyncSlavesFun =
+ fun ({amqqueue, N, D, AD, Excl, Args, Pid, SPids, MNodes, Pol}) ->
+ {amqqueue, N, D, AD, Excl, Args, Pid, SPids, [], MNodes, Pol}
+ end,
+ [ok = transform(T, AddSyncSlavesFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, mirror_nodes, policy])
+ || T <- Tables],
+ ok.
+
+-spec no_mirror_nodes() -> 'ok'.
+
+no_mirror_nodes() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ RemoveMirrorNodesFun =
+ fun ({amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, _MNodes, Pol}) ->
+ {amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol}
+ end,
+ [ok = transform(T, RemoveMirrorNodesFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, policy])
+ || T <- Tables],
+ ok.
+
+-spec gm_pids() -> 'ok'.
+
+gm_pids() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddGMPidsFun =
+ fun ({amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol}) ->
+ {amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol, []}
+ end,
+ [ok = transform(T, AddGMPidsFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, policy, gm_pids])
+ || T <- Tables],
+ ok.
+
+-spec exchange_decorators() -> 'ok'.
+
+exchange_decorators() ->
+ ok = exchange_decorators(rabbit_exchange),
+ ok = exchange_decorators(rabbit_durable_exchange).
+
+exchange_decorators(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches,
+ Policy}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches, Policy,
+ {[], []}}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ decorators]).
+
+-spec policy_apply_to() -> 'ok'.
+
+policy_apply_to() ->
+ transform(
+ rabbit_runtime_parameters,
+ fun ({runtime_parameters, Key = {_VHost, <<"policy">>, _Name}, Value}) ->
+ ApplyTo = apply_to(proplists:get_value(<<"definition">>, Value)),
+ {runtime_parameters, Key, [{<<"apply-to">>, ApplyTo} | Value]};
+ ({runtime_parameters, Key, Value}) ->
+ {runtime_parameters, Key, Value}
+ end,
+ [key, value]),
+ rabbit_policy:invalidate(),
+ ok.
+
+apply_to(Def) ->
+ case [proplists:get_value(K, Def) ||
+ K <- [<<"federation-upstream-set">>, <<"ha-mode">>]] of
+ [undefined, undefined] -> <<"all">>;
+ [_, undefined] -> <<"exchanges">>;
+ [undefined, _] -> <<"queues">>;
+ [_, _] -> <<"all">>
+ end.
+
+-spec queue_decorators() -> 'ok'.
+
+queue_decorators() ->
+ ok = queue_decorators(rabbit_queue),
+ ok = queue_decorators(rabbit_durable_queue).
+
+queue_decorators(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, Policy, GmPids}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, Policy, GmPids, []}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, policy, gm_pids, decorators]).
+
+-spec internal_system_x() -> 'ok'.
+
+internal_system_x() ->
+ transform(
+ rabbit_durable_exchange,
+ fun ({exchange, Name = {resource, _, _, <<"amq.rabbitmq.", _/binary>>},
+ Type, Dur, AutoDel, _Int, Args, Scratches, Policy, Decorators}) ->
+ {exchange, Name, Type, Dur, AutoDel, true, Args, Scratches,
+ Policy, Decorators};
+ (X) ->
+ X
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ decorators]).
+
+-spec cluster_name() -> 'ok'.
+
+cluster_name() ->
+ {atomic, ok} = mnesia:transaction(fun cluster_name_tx/0),
+ ok.
+
+cluster_name_tx() ->
+ %% mnesia:transform_table/4 does not let us delete records
+ T = rabbit_runtime_parameters,
+ mnesia:write_lock_table(T),
+ Ks = [K || {_VHost, <<"federation">>, <<"local-nodename">>} = K
+ <- mnesia:all_keys(T)],
+ case Ks of
+ [] -> ok;
+ [K|Tl] -> [{runtime_parameters, _K, Name}] = mnesia:read(T, K, write),
+ R = {runtime_parameters, cluster_name, Name},
+ mnesia:write(T, R, write),
+ case Tl of
+ [] -> ok;
+ _ -> {VHost, _, _} = K,
+ error_logger:warning_msg(
+ "Multiple local-nodenames found, picking '~s' "
+ "from '~s' for cluster name~n", [Name, VHost])
+ end
+ end,
+ [mnesia:delete(T, K, write) || K <- Ks],
+ ok.
+
+-spec down_slave_nodes() -> 'ok'.
+
+down_slave_nodes() ->
+ ok = down_slave_nodes(rabbit_queue),
+ ok = down_slave_nodes(rabbit_durable_queue).
+
+down_slave_nodes(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, Policy, GmPids, Decorators}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, [], Policy, GmPids, Decorators}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, down_slave_nodes, policy, gm_pids, decorators]).
+
+-spec queue_state() -> 'ok'.
+
+queue_state() ->
+ ok = queue_state(rabbit_queue),
+ ok = queue_state(rabbit_durable_queue).
+
+queue_state(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ live}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, down_slave_nodes, policy, gm_pids, decorators, state]).
+
+-spec recoverable_slaves() -> 'ok'.
+
+recoverable_slaves() ->
+ ok = recoverable_slaves(rabbit_queue),
+ ok = recoverable_slaves(rabbit_durable_queue).
+
+recoverable_slaves(Table) ->
+ transform(
+ Table, fun (Q) -> Q end, %% Don't change shape of record
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators,
+ state]).
+
+policy_version() ->
+ ok = policy_version(rabbit_queue),
+ ok = policy_version(rabbit_durable_queue).
+
+policy_version(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, 0}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state,
+ policy_version]).
+
+slave_pids_pending_shutdown() ->
+ ok = slave_pids_pending_shutdown(rabbit_queue),
+ ok = slave_pids_pending_shutdown(rabbit_durable_queue).
+
+slave_pids_pending_shutdown(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, PolicyVersion}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, PolicyVersion, []}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state,
+ policy_version, slave_pids_pending_shutdown]).
+
+-spec operator_policies() -> 'ok'.
+
+operator_policies() ->
+ ok = exchange_operator_policies(rabbit_exchange),
+ ok = exchange_operator_policies(rabbit_durable_exchange),
+ ok = queue_operator_policies(rabbit_queue),
+ ok = queue_operator_policies(rabbit_durable_queue).
+
+exchange_operator_policies(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Internal,
+ Args, Scratches, Policy, Decorators}) ->
+ {exchange, Name, Type, Dur, AutoDel, Internal,
+ Args, Scratches, Policy, undefined, Decorators}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ operator_policy, decorators]).
+
+queue_operator_policies(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, undefined, GmPids,
+ Decorators, State, PolicyVersion, SlavePidsPendingShutdown}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, operator_policy,
+ gm_pids, decorators, state, policy_version, slave_pids_pending_shutdown]).
+
+-spec queue_vhost_field() -> 'ok'.
+
+queue_vhost_field() ->
+ ok = queue_vhost_field(rabbit_queue),
+ ok = queue_vhost_field(rabbit_durable_queue),
+ {atomic, ok} = mnesia:add_table_index(rabbit_queue, vhost),
+ {atomic, ok} = mnesia:add_table_index(rabbit_durable_queue, vhost),
+ ok.
+
+queue_vhost_field(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name = {resource, VHost, queue, _QName}, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, OperatorPolicy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, OperatorPolicy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown, VHost}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, operator_policy,
+ gm_pids, decorators, state, policy_version, slave_pids_pending_shutdown, vhost]).
+
+-spec queue_options() -> 'ok'.
+
+queue_options() ->
+ ok = queue_options(rabbit_queue),
+ ok = queue_options(rabbit_durable_queue),
+ ok.
+
+queue_options(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, OperatorPolicy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown, VHost}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, OperatorPolicy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown, VHost, #{}}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, operator_policy,
+ gm_pids, decorators, state, policy_version, slave_pids_pending_shutdown, vhost, options]).
+
+%% Prior to 3.6.0, passwords were hashed using MD5, this populates
+%% existing records with said default. Users created with 3.6.0+ will
+%% have internal_user.hashing_algorithm populated by the internal
+%% authn backend.
+
+-spec user_password_hashing() -> 'ok'.
+
+user_password_hashing() ->
+ transform(
+ rabbit_user,
+ fun ({internal_user, Username, Hash, Tags}) ->
+ {internal_user, Username, Hash, Tags, rabbit_password_hashing_md5}
+ end,
+ [username, password_hash, tags, hashing_algorithm]).
+
+-spec topic_permission() -> 'ok'.
+topic_permission() ->
+ create(rabbit_topic_permission,
+ [{record_name, topic_permission},
+ {attributes, [topic_permission_key, permission]},
+ {disc_copies, [node()]}]).
+
+-spec exchange_options() -> 'ok'.
+
+exchange_options() ->
+ ok = exchange_options(rabbit_exchange),
+ ok = exchange_options(rabbit_durable_exchange).
+
+exchange_options(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Internal,
+ Args, Scratches, Policy, OperatorPolicy, Decorators}) ->
+ {exchange, Name, Type, Dur, AutoDel, Internal,
+ Args, Scratches, Policy, OperatorPolicy, Decorators, #{}}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ operator_policy, decorators, options]).
+
+%%--------------------------------------------------------------------
+
+transform(TableName, Fun, FieldList) ->
+ rabbit_table:wait([TableName]),
+ {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList),
+ ok.
+
+transform(TableName, Fun, FieldList, NewRecordName) ->
+ rabbit_table:wait([TableName]),
+ {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList,
+ NewRecordName),
+ ok.
+
+create(Tab, TabDef) ->
+ rabbit_log:debug("Will create a schema table named '~s'", [Tab]),
+ {atomic, ok} = mnesia:create_table(Tab, TabDef),
+ ok.
+
+%% Dumb replacement for rabbit_exchange:declare that does not require
+%% the exchange type registry or worker pool to be running by dint of
+%% not validating anything and assuming the exchange type does not
+%% require serialisation. NB: this assumes the
+%% pre-exchange-scratch-space format
+declare_exchange(XName, Type) ->
+ X = {exchange, XName, Type, true, false, false, []},
+ ok = mnesia:dirty_write(rabbit_durable_exchange, X).
diff --git a/deps/rabbit/src/rabbit_upgrade_preparation.erl b/deps/rabbit/src/rabbit_upgrade_preparation.erl
new file mode 100644
index 0000000000..fc1de24610
--- /dev/null
+++ b/deps/rabbit/src/rabbit_upgrade_preparation.erl
@@ -0,0 +1,51 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_upgrade_preparation).
+
+-export([await_online_quorum_plus_one/1, await_online_synchronised_mirrors/1]).
+
+%%
+%% API
+%%
+
+-define(SAMPLING_INTERVAL, 200).
+
+await_online_quorum_plus_one(Timeout) ->
+ Iterations = ceil(Timeout / ?SAMPLING_INTERVAL),
+ do_await_safe_online_quorum(Iterations).
+
+
+await_online_synchronised_mirrors(Timeout) ->
+ Iterations = ceil(Timeout / ?SAMPLING_INTERVAL),
+ do_await_online_synchronised_mirrors(Iterations).
+
+
+%%
+%% Implementation
+%%
+
+do_await_safe_online_quorum(0) ->
+ false;
+do_await_safe_online_quorum(IterationsLeft) ->
+ case rabbit_quorum_queue:list_with_minimum_quorum() of
+ [] -> true;
+ List when is_list(List) ->
+ timer:sleep(?SAMPLING_INTERVAL),
+ do_await_safe_online_quorum(IterationsLeft - 1)
+ end.
+
+
+do_await_online_synchronised_mirrors(0) ->
+ false;
+do_await_online_synchronised_mirrors(IterationsLeft) ->
+ case rabbit_amqqueue:list_local_mirrored_classic_without_synchronised_mirrors() of
+ [] -> true;
+ List when is_list(List) ->
+ timer:sleep(?SAMPLING_INTERVAL),
+ do_await_online_synchronised_mirrors(IterationsLeft - 1)
+ end.
diff --git a/deps/rabbit/src/rabbit_variable_queue.erl b/deps/rabbit/src/rabbit_variable_queue.erl
new file mode 100644
index 0000000000..cf6fa4a189
--- /dev/null
+++ b/deps/rabbit/src/rabbit_variable_queue.erl
@@ -0,0 +1,3015 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_variable_queue).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+ purge/1, purge_acks/1,
+ publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
+ discard/4, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+ ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+ set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+ handle_pre_hibernate/1, resume/1, msg_rates/1,
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4, multiple_routing_keys/0, handle_info/2]).
+
+-export([start/2, stop/1]).
+
+%% exported for testing only
+-export([start_msg_store/3, stop_msg_store/1, init/6]).
+
+-export([move_messages_to_vhost_store/0]).
+
+-export([migrate_queue/3, migrate_message/3, get_per_vhost_store_client/2,
+ get_global_store_client/1, log_upgrade_verbose/1,
+ log_upgrade_verbose/2]).
+
+-include_lib("stdlib/include/qlc.hrl").
+
+-define(QUEUE_MIGRATION_BATCH_SIZE, 100).
+-define(EMPTY_START_FUN_STATE, {fun (ok) -> finished end, ok}).
+
+%%----------------------------------------------------------------------------
+%% Messages, and their position in the queue, can be in memory or on
+%% disk, or both. Persistent messages will have both message and
+%% position pushed to disk as soon as they arrive; transient messages
+%% can be written to disk (and thus both types can be evicted from
+%% memory) under memory pressure. The question of whether a message is
+%% in RAM and whether it is persistent are orthogonal.
+%%
+%% Messages are persisted using the queue index and the message
+%% store. Normally the queue index holds the position of the message
+%% *within this queue* along with a couple of small bits of metadata,
+%% while the message store holds the message itself (including headers
+%% and other properties).
+%%
+%% However, as an optimisation, small messages can be embedded
+%% directly in the queue index and bypass the message store
+%% altogether.
+%%
+%% Definitions:
+%%
+%% alpha: this is a message where both the message itself, and its
+%% position within the queue are held in RAM
+%%
+%% beta: this is a message where the message itself is only held on
+%% disk (if persisted to the message store) but its position
+%% within the queue is held in RAM.
+%%
+%% gamma: this is a message where the message itself is only held on
+%% disk, but its position is both in RAM and on disk.
+%%
+%% delta: this is a collection of messages, represented by a single
+%% term, where the messages and their position are only held on
+%% disk.
+%%
+%% Note that for persistent messages, the message and its position
+%% within the queue are always held on disk, *in addition* to being in
+%% one of the above classifications.
+%%
+%% Also note that within this code, the term gamma seldom
+%% appears. It's frequently the case that gammas are defined by betas
+%% who have had their queue position recorded on disk.
+%%
+%% In general, messages move q1 -> q2 -> delta -> q3 -> q4, though
+%% many of these steps are frequently skipped. q1 and q4 only hold
+%% alphas, q2 and q3 hold both betas and gammas. When a message
+%% arrives, its classification is determined. It is then added to the
+%% rightmost appropriate queue.
+%%
+%% If a new message is determined to be a beta or gamma, q1 is
+%% empty. If a new message is determined to be a delta, q1 and q2 are
+%% empty (and actually q4 too).
+%%
+%% When removing messages from a queue, if q4 is empty then q3 is read
+%% directly. If q3 becomes empty then the next segment's worth of
+%% messages from delta are read into q3, reducing the size of
+%% delta. If the queue is non empty, either q4 or q3 contain
+%% entries. It is never permitted for delta to hold all the messages
+%% in the queue.
+%%
+%% The duration indicated to us by the memory_monitor is used to
+%% calculate, given our current ingress and egress rates, how many
+%% messages we should hold in RAM (i.e. as alphas). We track the
+%% ingress and egress rates for both messages and pending acks and
+%% rates for both are considered when calculating the number of
+%% messages to hold in RAM. When we need to push alphas to betas or
+%% betas to gammas, we favour writing out messages that are further
+%% from the head of the queue. This minimises writes to disk, as the
+%% messages closer to the tail of the queue stay in the queue for
+%% longer, thus do not need to be replaced as quickly by sending other
+%% messages to disk.
+%%
+%% Whilst messages are pushed to disk and forgotten from RAM as soon
+%% as requested by a new setting of the queue RAM duration, the
+%% inverse is not true: we only load messages back into RAM as
+%% demanded as the queue is read from. Thus only publishes to the
+%% queue will take up available spare capacity.
+%%
+%% When we report our duration to the memory monitor, we calculate
+%% average ingress and egress rates over the last two samples, and
+%% then calculate our duration based on the sum of the ingress and
+%% egress rates. More than two samples could be used, but it's a
+%% balance between responding quickly enough to changes in
+%% producers/consumers versus ignoring temporary blips. The problem
+%% with temporary blips is that with just a few queues, they can have
+%% substantial impact on the calculation of the average duration and
+%% hence cause unnecessary I/O. Another alternative is to increase the
+%% amqqueue_process:RAM_DURATION_UPDATE_PERIOD to beyond 5
+%% seconds. However, that then runs the risk of being too slow to
+%% inform the memory monitor of changes. Thus a 5 second interval,
+%% plus a rolling average over the last two samples seems to work
+%% well in practice.
+%%
+%% The sum of the ingress and egress rates is used because the egress
+%% rate alone is not sufficient. Adding in the ingress rate means that
+%% queues which are being flooded by messages are given more memory,
+%% resulting in them being able to process the messages faster (by
+%% doing less I/O, or at least deferring it) and thus helping keep
+%% their mailboxes empty and thus the queue as a whole is more
+%% responsive. If such a queue also has fast but previously idle
+%% consumers, the consumer can then start to be driven as fast as it
+%% can go, whereas if only egress rate was being used, the incoming
+%% messages may have to be written to disk and then read back in,
+%% resulting in the hard disk being a bottleneck in driving the
+%% consumers. Generally, we want to give Rabbit every chance of
+%% getting rid of messages as fast as possible and remaining
+%% responsive, and using only the egress rate impacts that goal.
+%%
+%% Once the queue has more alphas than the target_ram_count, the
+%% surplus must be converted to betas, if not gammas, if not rolled
+%% into delta. The conditions under which these transitions occur
+%% reflect the conflicting goals of minimising RAM cost per msg, and
+%% minimising CPU cost per msg. Once the msg has become a beta, its
+%% payload is no longer in RAM, thus a read from the msg_store must
+%% occur before the msg can be delivered, but the RAM cost of a beta
+%% is the same as a gamma, so converting a beta to gamma will not free
+%% up any further RAM. To reduce the RAM cost further, the gamma must
+%% be rolled into delta. Whilst recovering a beta or a gamma to an
+%% alpha requires only one disk read (from the msg_store), recovering
+%% a msg from within delta will require two reads (queue_index and
+%% then msg_store). But delta has a near-0 per-msg RAM cost. So the
+%% conflict is between using delta more, which will free up more
+%% memory, but require additional CPU and disk ops, versus using delta
+%% less and gammas and betas more, which will cost more memory, but
+%% require fewer disk ops and less CPU overhead.
+%%
+%% In the case of a persistent msg published to a durable queue, the
+%% msg is immediately written to the msg_store and queue_index. If
+%% then additionally converted from an alpha, it'll immediately go to
+%% a gamma (as it's already in queue_index), and cannot exist as a
+%% beta. Thus a durable queue with a mixture of persistent and
+%% transient msgs in it which has more messages than permitted by the
+%% target_ram_count may contain an interspersed mixture of betas and
+%% gammas in q2 and q3.
+%%
+%% There is then a ratio that controls how many betas and gammas there
+%% can be. This is based on the target_ram_count and thus expresses
+%% the fact that as the number of permitted alphas in the queue falls,
+%% so should the number of betas and gammas fall (i.e. delta
+%% grows). If q2 and q3 contain more than the permitted number of
+%% betas and gammas, then the surplus are forcibly converted to gammas
+%% (as necessary) and then rolled into delta. The ratio is that
+%% delta/(betas+gammas+delta) equals
+%% (betas+gammas+delta)/(target_ram_count+betas+gammas+delta). I.e. as
+%% the target_ram_count shrinks to 0, so must betas and gammas.
+%%
+%% The conversion of betas to deltas is done if there are at least
+%% ?IO_BATCH_SIZE betas in q2 & q3. This value should not be too small,
+%% otherwise the frequent operations on the queues of q2 and q3 will not be
+%% effectively amortised (switching the direction of queue access defeats
+%% amortisation). Note that there is a natural upper bound due to credit_flow
+%% limits on the alpha to beta conversion.
+%%
+%% The conversion from alphas to betas is chunked due to the
+%% credit_flow limits of the msg_store. This further smooths the
+%% effects of changes to the target_ram_count and ensures the queue
+%% remains responsive even when there is a large amount of IO work to
+%% do. The 'resume' callback is utilised to ensure that conversions
+%% are done as promptly as possible whilst ensuring the queue remains
+%% responsive.
+%%
+%% In the queue we keep track of both messages that are pending
+%% delivery and messages that are pending acks. In the event of a
+%% queue purge, we only need to load qi segments if the queue has
+%% elements in deltas (i.e. it came under significant memory
+%% pressure). In the event of a queue deletion, in addition to the
+%% preceding, by keeping track of pending acks in RAM, we do not need
+%% to search through qi segments looking for messages that are yet to
+%% be acknowledged.
+%%
+%% Pending acks are recorded in memory by storing the message itself.
+%% If the message has been sent to disk, we do not store the message
+%% content. During memory reduction, pending acks containing message
+%% content have that content removed and the corresponding messages
+%% are pushed out to disk.
+%%
+%% Messages from pending acks are returned to q4, q3 and delta during
+%% requeue, based on the limits of seq_id contained in each. Requeued
+%% messages retain their original seq_id, maintaining order
+%% when requeued.
+%%
+%% The order in which alphas are pushed to betas and pending acks
+%% are pushed to disk is determined dynamically. We always prefer to
+%% push messages for the source (alphas or acks) that is growing the
+%% fastest (with growth measured as avg. ingress - avg. egress).
+%%
+%% Notes on Clean Shutdown
+%% (This documents behaviour in variable_queue, queue_index and
+%% msg_store.)
+%%
+%% In order to try to achieve as fast a start-up as possible, if a
+%% clean shutdown occurs, we try to save out state to disk to reduce
+%% work on startup. In the msg_store this takes the form of the
+%% index_module's state, plus the file_summary ets table, and client
+%% refs. In the VQ, this takes the form of the count of persistent
+%% messages in the queue and references into the msg_stores. The
+%% queue_index adds to these terms the details of its segments and
+%% stores the terms in the queue directory.
+%%
+%% Two message stores are used. One is created for persistent messages
+%% to durable queues that must survive restarts, and the other is used
+%% for all other messages that just happen to need to be written to
+%% disk. On start up we can therefore nuke the transient message
+%% store, and be sure that the messages in the persistent store are
+%% all that we need.
+%%
+%% The references to the msg_stores are there so that the msg_store
+%% knows to only trust its saved state if all of the queues it was
+%% previously talking to come up cleanly. Likewise, the queues
+%% themselves (esp queue_index) skips work in init if all the queues
+%% and msg_store were shutdown cleanly. This gives both good speed
+%% improvements and also robustness so that if anything possibly went
+%% wrong in shutdown (or there was subsequent manual tampering), all
+%% messages and queues that can be recovered are recovered, safely.
+%%
+%% To delete transient messages lazily, the variable_queue, on
+%% startup, stores the next_seq_id reported by the queue_index as the
+%% transient_threshold. From that point on, whenever it's reading a
+%% message off disk via the queue_index, if the seq_id is below this
+%% threshold and the message is transient then it drops the message
+%% (the message itself won't exist on disk because it would have been
+%% stored in the transient msg_store which would have had its saved
+%% state nuked on startup). This avoids the expensive operation of
+%% scanning the entire queue on startup in order to delete transient
+%% messages that were only pushed to disk to save memory.
+%%
+%%----------------------------------------------------------------------------
+
+-behaviour(rabbit_backing_queue).
+
+-record(vqstate,
+ { q1,
+ q2,
+ delta,
+ q3,
+ q4,
+ next_seq_id,
+ ram_pending_ack, %% msgs using store, still in RAM
+ disk_pending_ack, %% msgs in store, paged out
+ qi_pending_ack, %% msgs using qi, *can't* be paged out
+ index_state,
+ msg_store_clients,
+ durable,
+ transient_threshold,
+ qi_embed_msgs_below,
+
+ len, %% w/o unacked
+ bytes, %% w/o unacked
+ unacked_bytes,
+ persistent_count, %% w unacked
+ persistent_bytes, %% w unacked
+ delta_transient_bytes, %%
+
+ target_ram_count,
+ ram_msg_count, %% w/o unacked
+ ram_msg_count_prev,
+ ram_ack_count_prev,
+ ram_bytes, %% w unacked
+ out_counter,
+ in_counter,
+ rates,
+ msgs_on_disk,
+ msg_indices_on_disk,
+ unconfirmed,
+ confirmed,
+ ack_out_counter,
+ ack_in_counter,
+ %% Unlike the other counters these two do not feed into
+ %% #rates{} and get reset
+ disk_read_count,
+ disk_write_count,
+
+ io_batch_size,
+
+ %% default queue or lazy queue
+ mode,
+ %% number of reduce_memory_usage executions, once it
+ %% reaches a threshold the queue will manually trigger a runtime GC
+ %% see: maybe_execute_gc/1
+ memory_reduction_run_count,
+ %% Queue data is grouped by VHost. We need to store it
+ %% to work with queue index.
+ virtual_host,
+ waiting_bump = false
+ }).
+
+-record(rates, { in, out, ack_in, ack_out, timestamp }).
+
+-record(msg_status,
+ { seq_id,
+ msg_id,
+ msg,
+ is_persistent,
+ is_delivered,
+ msg_in_store,
+ index_on_disk,
+ persist_to,
+ msg_props
+ }).
+
+-record(delta,
+ { start_seq_id, %% start_seq_id is inclusive
+ count,
+ transient,
+ end_seq_id %% end_seq_id is exclusive
+ }).
+
+-define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE, msg_store_transient).
+
+-define(QUEUE, lqueue).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("amqqueue.hrl").
+
+%%----------------------------------------------------------------------------
+
+-rabbit_upgrade({multiple_routing_keys, local, []}).
+-rabbit_upgrade({move_messages_to_vhost_store, message_store, []}).
+
+-type seq_id() :: non_neg_integer().
+
+-type rates() :: #rates { in :: float(),
+ out :: float(),
+ ack_in :: float(),
+ ack_out :: float(),
+ timestamp :: rabbit_types:timestamp()}.
+
+-type delta() :: #delta { start_seq_id :: non_neg_integer(),
+ count :: non_neg_integer(),
+ end_seq_id :: non_neg_integer() }.
+
+%% The compiler (rightfully) complains that ack() and state() are
+%% unused. For this reason we duplicate a -spec from
+%% rabbit_backing_queue with the only intent being to remove
+%% warnings. The problem here is that we can't parameterise the BQ
+%% behaviour by these two types as we would like to. We still leave
+%% these here for documentation purposes.
+-type ack() :: seq_id().
+-type state() :: #vqstate {
+ q1 :: ?QUEUE:?QUEUE(),
+ q2 :: ?QUEUE:?QUEUE(),
+ delta :: delta(),
+ q3 :: ?QUEUE:?QUEUE(),
+ q4 :: ?QUEUE:?QUEUE(),
+ next_seq_id :: seq_id(),
+ ram_pending_ack :: gb_trees:tree(),
+ disk_pending_ack :: gb_trees:tree(),
+ qi_pending_ack :: gb_trees:tree(),
+ index_state :: any(),
+ msg_store_clients :: 'undefined' | {{any(), binary()},
+ {any(), binary()}},
+ durable :: boolean(),
+ transient_threshold :: non_neg_integer(),
+ qi_embed_msgs_below :: non_neg_integer(),
+
+ len :: non_neg_integer(),
+ bytes :: non_neg_integer(),
+ unacked_bytes :: non_neg_integer(),
+
+ persistent_count :: non_neg_integer(),
+ persistent_bytes :: non_neg_integer(),
+
+ target_ram_count :: non_neg_integer() | 'infinity',
+ ram_msg_count :: non_neg_integer(),
+ ram_msg_count_prev :: non_neg_integer(),
+ ram_ack_count_prev :: non_neg_integer(),
+ ram_bytes :: non_neg_integer(),
+ out_counter :: non_neg_integer(),
+ in_counter :: non_neg_integer(),
+ rates :: rates(),
+ msgs_on_disk :: gb_sets:set(),
+ msg_indices_on_disk :: gb_sets:set(),
+ unconfirmed :: gb_sets:set(),
+ confirmed :: gb_sets:set(),
+ ack_out_counter :: non_neg_integer(),
+ ack_in_counter :: non_neg_integer(),
+ disk_read_count :: non_neg_integer(),
+ disk_write_count :: non_neg_integer(),
+
+ io_batch_size :: pos_integer(),
+ mode :: 'default' | 'lazy',
+ memory_reduction_run_count :: non_neg_integer()}.
+
+-define(BLANK_DELTA, #delta { start_seq_id = undefined,
+ count = 0,
+ transient = 0,
+ end_seq_id = undefined }).
+-define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z,
+ count = 0,
+ transient = 0,
+ end_seq_id = Z }).
+
+-define(MICROS_PER_SECOND, 1000000.0).
+
+%% We're sampling every 5s for RAM duration; a half life that is of
+%% the same order of magnitude is probably about right.
+-define(RATE_AVG_HALF_LIFE, 5.0).
+
+%% We will recalculate the #rates{} every time we get asked for our
+%% RAM duration, or every N messages published, whichever is
+%% sooner. We do this since the priority calculations in
+%% rabbit_amqqueue_process need fairly fresh rates.
+-define(MSGS_PER_RATE_CALC, 100).
+
+%% we define the garbage collector threshold
+%% it needs to tune the `reduce_memory_use` calls. Thus, the garbage collection.
+%% see: rabbitmq-server-973 and rabbitmq-server-964
+-define(DEFAULT_EXPLICIT_GC_RUN_OP_THRESHOLD, 1000).
+-define(EXPLICIT_GC_RUN_OP_THRESHOLD(Mode),
+ case get(explicit_gc_run_operation_threshold) of
+ undefined ->
+ Val = explicit_gc_run_operation_threshold_for_mode(Mode),
+ put(explicit_gc_run_operation_threshold, Val),
+ Val;
+ Val -> Val
+ end).
+
+explicit_gc_run_operation_threshold_for_mode(Mode) ->
+ {Key, Fallback} = case Mode of
+ lazy -> {lazy_queue_explicit_gc_run_operation_threshold,
+ ?DEFAULT_EXPLICIT_GC_RUN_OP_THRESHOLD};
+ _ -> {queue_explicit_gc_run_operation_threshold,
+ ?DEFAULT_EXPLICIT_GC_RUN_OP_THRESHOLD}
+ end,
+ rabbit_misc:get_env(rabbit, Key, Fallback).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start(VHost, DurableQueues) ->
+ {AllTerms, StartFunState} = rabbit_queue_index:start(VHost, DurableQueues),
+ %% Group recovery terms by vhost.
+ ClientRefs = [Ref || Terms <- AllTerms,
+ Terms /= non_clean_shutdown,
+ begin
+ Ref = proplists:get_value(persistent_ref, Terms),
+ Ref =/= undefined
+ end],
+ start_msg_store(VHost, ClientRefs, StartFunState),
+ {ok, AllTerms}.
+
+stop(VHost) ->
+ ok = stop_msg_store(VHost),
+ ok = rabbit_queue_index:stop(VHost).
+
+start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined ->
+ rabbit_log:info("Starting message stores for vhost '~s'~n", [VHost]),
+ do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE),
+ do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState),
+ ok.
+
+do_start_msg_store(VHost, Type, Refs, StartFunState) ->
+ case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of
+ {ok, _} ->
+ rabbit_log:info("Started message store of type ~s for vhost '~s'~n", [abbreviated_type(Type), VHost]);
+ {error, {no_such_vhost, VHost}} = Err ->
+ rabbit_log:error("Failed to start message store of type ~s for vhost '~s': the vhost no longer exists!~n",
+ [Type, VHost]),
+ exit(Err);
+ {error, Error} ->
+ rabbit_log:error("Failed to start message store of type ~s for vhost '~s': ~p~n",
+ [Type, VHost, Error]),
+ exit({error, Error})
+ end.
+
+abbreviated_type(?TRANSIENT_MSG_STORE) -> transient;
+abbreviated_type(?PERSISTENT_MSG_STORE) -> persistent.
+
+stop_msg_store(VHost) ->
+ rabbit_vhost_msg_store:stop(VHost, ?TRANSIENT_MSG_STORE),
+ rabbit_vhost_msg_store:stop(VHost, ?PERSISTENT_MSG_STORE),
+ ok.
+
+init(Queue, Recover, Callback) ->
+ init(
+ Queue, Recover, Callback,
+ fun (MsgIds, ActionTaken) ->
+ msgs_written_to_disk(Callback, MsgIds, ActionTaken)
+ end,
+ fun (MsgIds) -> msg_indices_written_to_disk(Callback, MsgIds) end,
+ fun (MsgIds) -> msgs_and_indices_written_to_disk(Callback, MsgIds) end).
+
+init(Q, new, AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqueue(Q) ->
+ QueueName = amqqueue:get_name(Q),
+ IsDurable = amqqueue:is_durable(Q),
+ IndexState = rabbit_queue_index:init(QueueName,
+ MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+ VHost = QueueName#resource.virtual_host,
+ init(IsDurable, IndexState, 0, 0, [],
+ case IsDurable of
+ true -> msg_store_client_init(?PERSISTENT_MSG_STORE,
+ MsgOnDiskFun, AsyncCallback, VHost);
+ false -> undefined
+ end,
+ msg_store_client_init(?TRANSIENT_MSG_STORE, undefined,
+ AsyncCallback, VHost), VHost);
+
+%% We can be recovering a transient queue if it crashed
+init(Q, Terms, AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqueue(Q) ->
+ QueueName = amqqueue:get_name(Q),
+ IsDurable = amqqueue:is_durable(Q),
+ {PRef, RecoveryTerms} = process_recovery_terms(Terms),
+ VHost = QueueName#resource.virtual_host,
+ {PersistentClient, ContainsCheckFun} =
+ case IsDurable of
+ true -> C = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
+ MsgOnDiskFun, AsyncCallback,
+ VHost),
+ {C, fun (MsgId) when is_binary(MsgId) ->
+ rabbit_msg_store:contains(MsgId, C);
+ (#basic_message{is_persistent = Persistent}) ->
+ Persistent
+ end};
+ false -> {undefined, fun(_MsgId) -> false end}
+ end,
+ TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE,
+ undefined, AsyncCallback,
+ VHost),
+ {DeltaCount, DeltaBytes, IndexState} =
+ rabbit_queue_index:recover(
+ QueueName, RecoveryTerms,
+ rabbit_vhost_msg_store:successfully_recovered_state(
+ VHost,
+ ?PERSISTENT_MSG_STORE),
+ ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+ init(IsDurable, IndexState, DeltaCount, DeltaBytes, RecoveryTerms,
+ PersistentClient, TransientClient, VHost).
+
+process_recovery_terms(Terms=non_clean_shutdown) ->
+ {rabbit_guid:gen(), Terms};
+process_recovery_terms(Terms) ->
+ case proplists:get_value(persistent_ref, Terms) of
+ undefined -> {rabbit_guid:gen(), []};
+ PRef -> {PRef, Terms}
+ end.
+
+terminate(_Reason, State) ->
+ State1 = #vqstate { virtual_host = VHost,
+ persistent_count = PCount,
+ persistent_bytes = PBytes,
+ index_state = IndexState,
+ msg_store_clients = {MSCStateP, MSCStateT} } =
+ purge_pending_ack(true, State),
+ PRef = case MSCStateP of
+ undefined -> undefined;
+ _ -> ok = maybe_client_terminate(MSCStateP),
+ rabbit_msg_store:client_ref(MSCStateP)
+ end,
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+ Terms = [{persistent_ref, PRef},
+ {persistent_count, PCount},
+ {persistent_bytes, PBytes}],
+ a(State1#vqstate {
+ index_state = rabbit_queue_index:terminate(VHost, Terms, IndexState),
+ msg_store_clients = undefined }).
+
+%% the only difference between purge and delete is that delete also
+%% needs to delete everything that's been delivered and not ack'd.
+delete_and_terminate(_Reason, State) ->
+ %% Normally when we purge messages we interact with the qi by
+ %% issues delivers and acks for every purged message. In this case
+ %% we don't need to do that, so we just delete the qi.
+ State1 = purge_and_index_reset(State),
+ State2 = #vqstate { msg_store_clients = {MSCStateP, MSCStateT} } =
+ purge_pending_ack_delete_and_terminate(State1),
+ case MSCStateP of
+ undefined -> ok;
+ _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP)
+ end,
+ rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+ a(State2 #vqstate { msg_store_clients = undefined }).
+
+delete_crashed(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ ok = rabbit_queue_index:erase(QName).
+
+purge(State = #vqstate { len = Len }) ->
+ case is_pending_ack_empty(State) and is_unconfirmed_empty(State) of
+ true ->
+ {Len, purge_and_index_reset(State)};
+ false ->
+ {Len, purge_when_pending_acks(State)}
+ end.
+
+purge_acks(State) -> a(purge_pending_ack(false, State)).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
+ State1 =
+ publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ fun maybe_write_to_disk/4,
+ State),
+ a(maybe_reduce_memory_use(maybe_update_rates(State1))).
+
+batch_publish(Publishes, ChPid, Flow, State) ->
+ {ChPid, Flow, State1} =
+ lists:foldl(fun batch_publish1/2, {ChPid, Flow, State}, Publishes),
+ State2 = ui(State1),
+ a(maybe_reduce_memory_use(maybe_update_rates(State2))).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
+ {SeqId, State1} =
+ publish_delivered1(Msg, MsgProps, ChPid, Flow,
+ fun maybe_write_to_disk/4,
+ State),
+ {SeqId, a(maybe_reduce_memory_use(maybe_update_rates(State1)))}.
+
+batch_publish_delivered(Publishes, ChPid, Flow, State) ->
+ {ChPid, Flow, SeqIds, State1} =
+ lists:foldl(fun batch_publish_delivered1/2,
+ {ChPid, Flow, [], State}, Publishes),
+ State2 = ui(State1),
+ {lists:reverse(SeqIds), a(maybe_reduce_memory_use(maybe_update_rates(State2)))}.
+
+discard(_MsgId, _ChPid, _Flow, State) -> State.
+
+drain_confirmed(State = #vqstate { confirmed = C }) ->
+ case gb_sets:is_empty(C) of
+ true -> {[], State}; %% common case
+ false -> {gb_sets:to_list(C), State #vqstate {
+ confirmed = gb_sets:new() }}
+ end.
+
+dropwhile(Pred, State) ->
+ {MsgProps, State1} =
+ remove_by_predicate(Pred, State),
+ {MsgProps, a(State1)}.
+
+fetchwhile(Pred, Fun, Acc, State) ->
+ {MsgProps, Acc1, State1} =
+ fetch_by_predicate(Pred, Fun, Acc, State),
+ {MsgProps, Acc1, a(State1)}.
+
+fetch(AckRequired, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {empty, a(State1)};
+ {{value, MsgStatus}, State1} ->
+ %% it is possible that the message wasn't read from disk
+ %% at this point, so read it in.
+ {Msg, State2} = read_msg(MsgStatus, State1),
+ {AckTag, State3} = remove(AckRequired, MsgStatus, State2),
+ {{Msg, MsgStatus#msg_status.is_delivered, AckTag}, a(State3)}
+ end.
+
+drop(AckRequired, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {empty, a(State1)};
+ {{value, MsgStatus}, State1} ->
+ {AckTag, State2} = remove(AckRequired, MsgStatus, State1),
+ {{MsgStatus#msg_status.msg_id, AckTag}, a(State2)}
+ end.
+
+%% Duplicated from rabbit_backing_queue
+-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}.
+
+ack([], State) ->
+ {[], State};
+%% optimisation: this head is essentially a partial evaluation of the
+%% general case below, for the single-ack case.
+ack([SeqId], State) ->
+ case remove_pending_ack(true, SeqId, State) of
+ {none, _} ->
+ {[], State};
+ {#msg_status { msg_id = MsgId,
+ is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ State1 = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState,
+ ack_out_counter = AckOutCount }} ->
+ IndexState1 = case IndexOnDisk of
+ true -> rabbit_queue_index:ack([SeqId], IndexState);
+ false -> IndexState
+ end,
+ case MsgInStore of
+ true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+ false -> ok
+ end,
+ {[MsgId],
+ a(State1 #vqstate { index_state = IndexState1,
+ ack_out_counter = AckOutCount + 1 })}
+ end;
+ack(AckTags, State) ->
+ {{IndexOnDiskSeqIds, MsgIdsByStore, AllMsgIds},
+ State1 = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState,
+ ack_out_counter = AckOutCount }} =
+ lists:foldl(
+ fun (SeqId, {Acc, State2}) ->
+ case remove_pending_ack(true, SeqId, State2) of
+ {none, _} ->
+ {Acc, State2};
+ {MsgStatus, State3} ->
+ {accumulate_ack(MsgStatus, Acc), State3}
+ end
+ end, {accumulate_ack_init(), State}, AckTags),
+ IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ {lists:reverse(AllMsgIds),
+ a(State1 #vqstate { index_state = IndexState1,
+ ack_out_counter = AckOutCount + length(AckTags) })}.
+
+requeue(AckTags, #vqstate { mode = default,
+ delta = Delta,
+ q3 = Q3,
+ q4 = Q4,
+ in_counter = InCounter,
+ len = Len } = State) ->
+ {SeqIds, Q4a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q4, [],
+ beta_limit(Q3),
+ fun publish_alpha/2, State),
+ {SeqIds1, Q3a, MsgIds1, State2} = queue_merge(SeqIds, Q3, MsgIds,
+ delta_limit(Delta),
+ fun publish_beta/2, State1),
+ {Delta1, MsgIds2, State3} = delta_merge(SeqIds1, Delta, MsgIds1,
+ State2),
+ MsgCount = length(MsgIds2),
+ {MsgIds2, a(maybe_reduce_memory_use(
+ maybe_update_rates(ui(
+ State3 #vqstate { delta = Delta1,
+ q3 = Q3a,
+ q4 = Q4a,
+ in_counter = InCounter + MsgCount,
+ len = Len + MsgCount }))))};
+requeue(AckTags, #vqstate { mode = lazy,
+ delta = Delta,
+ q3 = Q3,
+ in_counter = InCounter,
+ len = Len } = State) ->
+ {SeqIds, Q3a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q3, [],
+ delta_limit(Delta),
+ fun publish_beta/2, State),
+ {Delta1, MsgIds1, State2} = delta_merge(SeqIds, Delta, MsgIds,
+ State1),
+ MsgCount = length(MsgIds1),
+ {MsgIds1, a(maybe_reduce_memory_use(
+ maybe_update_rates(ui(
+ State2 #vqstate { delta = Delta1,
+ q3 = Q3a,
+ in_counter = InCounter + MsgCount,
+ len = Len + MsgCount }))))}.
+
+ackfold(MsgFun, Acc, State, AckTags) ->
+ {AccN, StateN} =
+ lists:foldl(fun(SeqId, {Acc0, State0}) ->
+ MsgStatus = lookup_pending_ack(SeqId, State0),
+ {Msg, State1} = read_msg(MsgStatus, State0),
+ {MsgFun(Msg, SeqId, Acc0), State1}
+ end, {Acc, State}, AckTags),
+ {AccN, a(StateN)}.
+
+fold(Fun, Acc, State = #vqstate{index_state = IndexState}) ->
+ {Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState},
+ [msg_iterator(State),
+ disk_ack_iterator(State),
+ ram_ack_iterator(State),
+ qi_ack_iterator(State)]),
+ ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}).
+
+len(#vqstate { len = Len }) -> Len.
+
+is_empty(State) -> 0 == len(State).
+
+depth(State) ->
+ len(State) + count_pending_acks(State).
+
+set_ram_duration_target(
+ DurationTarget, State = #vqstate {
+ rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate },
+ target_ram_count = TargetRamCount }) ->
+ Rate =
+ AvgEgressRate + AvgIngressRate + AvgAckEgressRate + AvgAckIngressRate,
+ TargetRamCount1 =
+ case DurationTarget of
+ infinity -> infinity;
+ _ -> trunc(DurationTarget * Rate) %% msgs = sec * msgs/sec
+ end,
+ State1 = State #vqstate { target_ram_count = TargetRamCount1 },
+ a(case TargetRamCount1 == infinity orelse
+ (TargetRamCount =/= infinity andalso
+ TargetRamCount1 >= TargetRamCount) of
+ true -> State1;
+ false -> reduce_memory_use(State1)
+ end).
+
+maybe_update_rates(State = #vqstate{ in_counter = InCount,
+ out_counter = OutCount })
+ when InCount + OutCount > ?MSGS_PER_RATE_CALC ->
+ update_rates(State);
+maybe_update_rates(State) ->
+ State.
+
+update_rates(State = #vqstate{ in_counter = InCount,
+ out_counter = OutCount,
+ ack_in_counter = AckInCount,
+ ack_out_counter = AckOutCount,
+ rates = #rates{ in = InRate,
+ out = OutRate,
+ ack_in = AckInRate,
+ ack_out = AckOutRate,
+ timestamp = TS }}) ->
+ Now = erlang:monotonic_time(),
+
+ Rates = #rates { in = update_rate(Now, TS, InCount, InRate),
+ out = update_rate(Now, TS, OutCount, OutRate),
+ ack_in = update_rate(Now, TS, AckInCount, AckInRate),
+ ack_out = update_rate(Now, TS, AckOutCount, AckOutRate),
+ timestamp = Now },
+
+ State#vqstate{ in_counter = 0,
+ out_counter = 0,
+ ack_in_counter = 0,
+ ack_out_counter = 0,
+ rates = Rates }.
+
+update_rate(Now, TS, Count, Rate) ->
+ Time = erlang:convert_time_unit(Now - TS, native, micro_seconds) /
+ ?MICROS_PER_SECOND,
+ if
+ Time == 0 -> Rate;
+ true -> rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE,
+ Count / Time, Rate)
+ end.
+
+ram_duration(State) ->
+ State1 = #vqstate { rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate },
+ ram_msg_count = RamMsgCount,
+ ram_msg_count_prev = RamMsgCountPrev,
+ ram_pending_ack = RPA,
+ qi_pending_ack = QPA,
+ ram_ack_count_prev = RamAckCountPrev } =
+ update_rates(State),
+
+ RamAckCount = gb_trees:size(RPA) + gb_trees:size(QPA),
+
+ Duration = %% msgs+acks / (msgs+acks/sec) == sec
+ case lists:all(fun (X) -> X < 0.01 end,
+ [AvgEgressRate, AvgIngressRate,
+ AvgAckEgressRate, AvgAckIngressRate]) of
+ true -> infinity;
+ false -> (RamMsgCountPrev + RamMsgCount +
+ RamAckCount + RamAckCountPrev) /
+ (4 * (AvgEgressRate + AvgIngressRate +
+ AvgAckEgressRate + AvgAckIngressRate))
+ end,
+
+ {Duration, State1}.
+
+needs_timeout(#vqstate { index_state = IndexState }) ->
+ case rabbit_queue_index:needs_sync(IndexState) of
+ confirms -> timed;
+ other -> idle;
+ false -> false
+ end.
+
+timeout(State = #vqstate { index_state = IndexState }) ->
+ State #vqstate { index_state = rabbit_queue_index:sync(IndexState) }.
+
+handle_pre_hibernate(State = #vqstate { index_state = IndexState }) ->
+ State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }.
+
+handle_info(bump_reduce_memory_use, State = #vqstate{ waiting_bump = true }) ->
+ State#vqstate{ waiting_bump = false };
+handle_info(bump_reduce_memory_use, State) ->
+ State.
+
+resume(State) -> a(reduce_memory_use(State)).
+
+msg_rates(#vqstate { rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate } }) ->
+ {AvgIngressRate, AvgEgressRate}.
+
+info(messages_ready_ram, #vqstate{ram_msg_count = RamMsgCount}) ->
+ RamMsgCount;
+info(messages_unacknowledged_ram, #vqstate{ram_pending_ack = RPA,
+ qi_pending_ack = QPA}) ->
+ gb_trees:size(RPA) + gb_trees:size(QPA);
+info(messages_ram, State) ->
+ info(messages_ready_ram, State) + info(messages_unacknowledged_ram, State);
+info(messages_persistent, #vqstate{persistent_count = PersistentCount}) ->
+ PersistentCount;
+info(messages_paged_out, #vqstate{delta = #delta{transient = Count}}) ->
+ Count;
+info(message_bytes, #vqstate{bytes = Bytes,
+ unacked_bytes = UBytes}) ->
+ Bytes + UBytes;
+info(message_bytes_ready, #vqstate{bytes = Bytes}) ->
+ Bytes;
+info(message_bytes_unacknowledged, #vqstate{unacked_bytes = UBytes}) ->
+ UBytes;
+info(message_bytes_ram, #vqstate{ram_bytes = RamBytes}) ->
+ RamBytes;
+info(message_bytes_persistent, #vqstate{persistent_bytes = PersistentBytes}) ->
+ PersistentBytes;
+info(message_bytes_paged_out, #vqstate{delta_transient_bytes = PagedOutBytes}) ->
+ PagedOutBytes;
+info(head_message_timestamp, #vqstate{
+ q3 = Q3,
+ q4 = Q4,
+ ram_pending_ack = RPA,
+ qi_pending_ack = QPA}) ->
+ head_message_timestamp(Q3, Q4, RPA, QPA);
+info(disk_reads, #vqstate{disk_read_count = Count}) ->
+ Count;
+info(disk_writes, #vqstate{disk_write_count = Count}) ->
+ Count;
+info(backing_queue_status, #vqstate {
+ q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = Mode,
+ len = Len,
+ target_ram_count = TargetRamCount,
+ next_seq_id = NextSeqId,
+ rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate }}) ->
+
+ [ {mode , Mode},
+ {q1 , ?QUEUE:len(Q1)},
+ {q2 , ?QUEUE:len(Q2)},
+ {delta , Delta},
+ {q3 , ?QUEUE:len(Q3)},
+ {q4 , ?QUEUE:len(Q4)},
+ {len , Len},
+ {target_ram_count , TargetRamCount},
+ {next_seq_id , NextSeqId},
+ {avg_ingress_rate , AvgIngressRate},
+ {avg_egress_rate , AvgEgressRate},
+ {avg_ack_ingress_rate, AvgAckIngressRate},
+ {avg_ack_egress_rate , AvgAckEgressRate} ];
+info(_, _) ->
+ ''.
+
+invoke(?MODULE, Fun, State) -> Fun(?MODULE, State);
+invoke( _, _, State) -> State.
+
+is_duplicate(_Msg, State) -> {false, State}.
+
+set_queue_mode(Mode, State = #vqstate { mode = Mode }) ->
+ State;
+set_queue_mode(lazy, State = #vqstate {
+ target_ram_count = TargetRamCount }) ->
+ %% To become a lazy queue we need to page everything to disk first.
+ State1 = convert_to_lazy(State),
+ %% restore the original target_ram_count
+ a(State1 #vqstate { mode = lazy, target_ram_count = TargetRamCount });
+set_queue_mode(default, State) ->
+ %% becoming a default queue means loading messages from disk like
+ %% when a queue is recovered.
+ a(maybe_deltas_to_betas(State #vqstate { mode = default }));
+set_queue_mode(_, State) ->
+ State.
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, _State) ->
+ lists:foldl(fun ({{#basic_message{ id = Id }, _Props}, AckTag}, Acc) ->
+ [{Id, AckTag} | Acc]
+ end, Accumulator, lists:zip(Msgs, AckTags)).
+
+convert_to_lazy(State) ->
+ State1 = #vqstate { delta = Delta, q3 = Q3, len = Len } =
+ set_ram_duration_target(0, State),
+ case Delta#delta.count + ?QUEUE:len(Q3) == Len of
+ true ->
+ State1;
+ false ->
+ %% When pushing messages to disk, we might have been
+ %% blocked by the msg_store, so we need to see if we have
+ %% to wait for more credit, and then keep paging messages.
+ %%
+ %% The amqqueue_process could have taken care of this, but
+ %% between the time it receives the bump_credit msg and
+ %% calls BQ:resume to keep paging messages to disk, some
+ %% other request may arrive to the BQ which at this moment
+ %% is not in a proper state for a lazy BQ (unless all
+ %% messages have been paged to disk already).
+ wait_for_msg_store_credit(),
+ convert_to_lazy(resume(State1))
+ end.
+
+wait_for_msg_store_credit() ->
+ case credit_flow:blocked() of
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg)
+ end;
+ false -> ok
+ end.
+
+%% Get the Timestamp property of the first msg, if present. This is
+%% the one with the oldest timestamp among the heads of the pending
+%% acks and unread queues. We can't check disk_pending_acks as these
+%% are paged out - we assume some will soon be paged in rather than
+%% forcing it to happen. Pending ack msgs are included as they are
+%% regarded as unprocessed until acked, this also prevents the result
+%% apparently oscillating during repeated rejects. Q3 is only checked
+%% when Q4 is empty as any Q4 msg will be earlier.
+head_message_timestamp(Q3, Q4, RPA, QPA) ->
+ HeadMsgs = [ HeadMsgStatus#msg_status.msg ||
+ HeadMsgStatus <-
+ [ get_qs_head([Q4, Q3]),
+ get_pa_head(RPA),
+ get_pa_head(QPA) ],
+ HeadMsgStatus /= undefined,
+ HeadMsgStatus#msg_status.msg /= undefined ],
+
+ Timestamps =
+ [Timestamp || HeadMsg <- HeadMsgs,
+ Timestamp <- [rabbit_basic:extract_timestamp(
+ HeadMsg#basic_message.content)],
+ Timestamp /= undefined
+ ],
+
+ case Timestamps == [] of
+ true -> '';
+ false -> lists:min(Timestamps)
+ end.
+
+get_qs_head(Qs) ->
+ catch lists:foldl(
+ fun (Q, Acc) ->
+ case get_q_head(Q) of
+ undefined -> Acc;
+ Val -> throw(Val)
+ end
+ end, undefined, Qs).
+
+get_q_head(Q) ->
+ get_collection_head(Q, fun ?QUEUE:is_empty/1, fun ?QUEUE:peek/1).
+
+get_pa_head(PA) ->
+ get_collection_head(PA, fun gb_trees:is_empty/1, fun gb_trees:smallest/1).
+
+get_collection_head(Col, IsEmpty, GetVal) ->
+ case IsEmpty(Col) of
+ false ->
+ {_, MsgStatus} = GetVal(Col),
+ MsgStatus;
+ true -> undefined
+ end.
+
+%%----------------------------------------------------------------------------
+%% Minor helpers
+%%----------------------------------------------------------------------------
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = default,
+ len = Len,
+ bytes = Bytes,
+ unacked_bytes = UnackedBytes,
+ persistent_count = PersistentCount,
+ persistent_bytes = PersistentBytes,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes}) ->
+ E1 = ?QUEUE:is_empty(Q1),
+ E2 = ?QUEUE:is_empty(Q2),
+ ED = Delta#delta.count == 0,
+ E3 = ?QUEUE:is_empty(Q3),
+ E4 = ?QUEUE:is_empty(Q4),
+ LZ = Len == 0,
+
+ %% if q1 has messages then q3 cannot be empty. See publish/6.
+ true = E1 or not E3,
+ %% if q2 has messages then we have messages in delta (paged to
+ %% disk). See push_alphas_to_betas/2.
+ true = E2 or not ED,
+ %% if delta has messages then q3 cannot be empty. This is enforced
+ %% by paging, where min([?SEGMENT_ENTRY_COUNT, len(q3)]) messages
+ %% are always kept on RAM.
+ true = ED or not E3,
+ %% if the queue length is 0, then q3 and q4 must be empty.
+ true = LZ == (E3 and E4),
+
+ true = Len >= 0,
+ true = Bytes >= 0,
+ true = UnackedBytes >= 0,
+ true = PersistentCount >= 0,
+ true = PersistentBytes >= 0,
+ true = RamMsgCount >= 0,
+ true = RamMsgCount =< Len,
+ true = RamBytes >= 0,
+ true = RamBytes =< Bytes + UnackedBytes,
+
+ State;
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = lazy,
+ len = Len,
+ bytes = Bytes,
+ unacked_bytes = UnackedBytes,
+ persistent_count = PersistentCount,
+ persistent_bytes = PersistentBytes,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes}) ->
+ E1 = ?QUEUE:is_empty(Q1),
+ E2 = ?QUEUE:is_empty(Q2),
+ ED = Delta#delta.count == 0,
+ E3 = ?QUEUE:is_empty(Q3),
+ E4 = ?QUEUE:is_empty(Q4),
+ LZ = Len == 0,
+ L3 = ?QUEUE:len(Q3),
+
+ %% q1 must always be empty, since q1 only gets messages during
+ %% publish, but for lazy queues messages go straight to delta.
+ true = E1,
+
+ %% q2 only gets messages from q1 when push_alphas_to_betas is
+ %% called for a non empty delta, which won't be the case for a
+ %% lazy queue. This means q2 must always be empty.
+ true = E2,
+
+ %% q4 must always be empty, since q1 only gets messages during
+ %% publish, but for lazy queues messages go straight to delta.
+ true = E4,
+
+ %% if the queue is empty, then delta is empty and q3 is empty.
+ true = LZ == (ED and E3),
+
+ %% There should be no messages in q1, q2, and q4
+ true = Delta#delta.count + L3 == Len,
+
+ true = Len >= 0,
+ true = Bytes >= 0,
+ true = UnackedBytes >= 0,
+ true = PersistentCount >= 0,
+ true = PersistentBytes >= 0,
+ true = RamMsgCount >= 0,
+ true = RamMsgCount =< Len,
+ true = RamBytes >= 0,
+ true = RamBytes =< Bytes + UnackedBytes,
+
+ State.
+
+d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
+ when Start + Count =< End ->
+ Delta.
+
+m(MsgStatus = #msg_status { is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk }) ->
+ true = (not IsPersistent) or IndexOnDisk,
+ true = msg_in_ram(MsgStatus) or MsgInStore,
+ MsgStatus.
+
+one_if(true ) -> 1;
+one_if(false) -> 0.
+
+cons_if(true, E, L) -> [E | L];
+cons_if(false, _E, L) -> L.
+
+gb_sets_maybe_insert(false, _Val, Set) -> Set;
+gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set).
+
+msg_status(IsPersistent, IsDelivered, SeqId,
+ Msg = #basic_message {id = MsgId}, MsgProps, IndexMaxSize) ->
+ #msg_status{seq_id = SeqId,
+ msg_id = MsgId,
+ msg = Msg,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_in_store = false,
+ index_on_disk = false,
+ persist_to = determine_persist_to(Msg, MsgProps, IndexMaxSize),
+ msg_props = MsgProps}.
+
+beta_msg_status({Msg = #basic_message{id = MsgId},
+ SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+ MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+ MS0#msg_status{msg_id = MsgId,
+ msg = Msg,
+ persist_to = queue_index,
+ msg_in_store = false};
+
+beta_msg_status({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+ MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+ MS0#msg_status{msg_id = MsgId,
+ msg = undefined,
+ persist_to = msg_store,
+ msg_in_store = true}.
+
+beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered) ->
+ #msg_status{seq_id = SeqId,
+ msg = undefined,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ index_on_disk = true,
+ msg_props = MsgProps}.
+
+trim_msg_status(MsgStatus) ->
+ case persist_to(MsgStatus) of
+ msg_store -> MsgStatus#msg_status{msg = undefined};
+ queue_index -> MsgStatus
+ end.
+
+with_msg_store_state({MSCStateP, MSCStateT}, true, Fun) ->
+ {Result, MSCStateP1} = Fun(MSCStateP),
+ {Result, {MSCStateP1, MSCStateT}};
+with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) ->
+ {Result, MSCStateT1} = Fun(MSCStateT),
+ {Result, {MSCStateP, MSCStateT1}}.
+
+with_immutable_msg_store_state(MSCState, IsPersistent, Fun) ->
+ {Res, MSCState} = with_msg_store_state(MSCState, IsPersistent,
+ fun (MSCState1) ->
+ {Fun(MSCState1), MSCState1}
+ end),
+ Res.
+
+msg_store_client_init(MsgStore, MsgOnDiskFun, Callback, VHost) ->
+ msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun,
+ Callback, VHost).
+
+msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback, VHost) ->
+ CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE),
+ rabbit_vhost_msg_store:client_init(VHost, MsgStore,
+ Ref, MsgOnDiskFun,
+ fun () ->
+ Callback(?MODULE, CloseFDsFun)
+ end).
+
+msg_store_write(MSCState, IsPersistent, MsgId, Msg) ->
+ with_immutable_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) ->
+ rabbit_msg_store:write_flow(MsgId, Msg, MSCState1)
+ end).
+
+msg_store_read(MSCState, IsPersistent, MsgId) ->
+ with_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) ->
+ rabbit_msg_store:read(MsgId, MSCState1)
+ end).
+
+msg_store_remove(MSCState, IsPersistent, MsgIds) ->
+ with_immutable_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MCSState1) ->
+ rabbit_msg_store:remove(MsgIds, MCSState1)
+ end).
+
+msg_store_close_fds(MSCState, IsPersistent) ->
+ with_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end).
+
+msg_store_close_fds_fun(IsPersistent) ->
+ fun (?MODULE, State = #vqstate { msg_store_clients = MSCState }) ->
+ {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent),
+ State #vqstate { msg_store_clients = MSCState1 }
+ end.
+
+maybe_write_delivered(false, _SeqId, IndexState) ->
+ IndexState;
+maybe_write_delivered(true, SeqId, IndexState) ->
+ rabbit_queue_index:deliver([SeqId], IndexState).
+
+betas_from_index_entries(List, TransientThreshold, DelsAndAcksFun, State) ->
+ {Filtered, Delivers, Acks, RamReadyCount, RamBytes, TransientCount, TransientBytes} =
+ lists:foldr(
+ fun ({_MsgOrId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
+ {Filtered1, Delivers1, Acks1, RRC, RB, TC, TB} = Acc) ->
+ case SeqId < TransientThreshold andalso not IsPersistent of
+ true -> {Filtered1,
+ cons_if(not IsDelivered, SeqId, Delivers1),
+ [SeqId | Acks1], RRC, RB, TC, TB};
+ false -> MsgStatus = m(beta_msg_status(M)),
+ HaveMsg = msg_in_ram(MsgStatus),
+ Size = msg_size(MsgStatus),
+ case is_msg_in_pending_acks(SeqId, State) of
+ false -> {?QUEUE:in_r(MsgStatus, Filtered1),
+ Delivers1, Acks1,
+ RRC + one_if(HaveMsg),
+ RB + one_if(HaveMsg) * Size,
+ TC + one_if(not IsPersistent),
+ TB + one_if(not IsPersistent) * Size};
+ true -> Acc %% [0]
+ end
+ end
+ end, {?QUEUE:new(), [], [], 0, 0, 0, 0}, List),
+ {Filtered, RamReadyCount, RamBytes, DelsAndAcksFun(Delivers, Acks, State),
+ TransientCount, TransientBytes}.
+%% [0] We don't increase RamBytes here, even though it pertains to
+%% unacked messages too, since if HaveMsg then the message must have
+%% been stored in the QI, thus the message must have been in
+%% qi_pending_ack, thus it must already have been in RAM.
+
+is_msg_in_pending_acks(SeqId, #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ (gb_trees:is_defined(SeqId, RPA) orelse
+ gb_trees:is_defined(SeqId, DPA) orelse
+ gb_trees:is_defined(SeqId, QPA)).
+
+expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X), IsPersistent) ->
+ d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1,
+ transient = one_if(not IsPersistent)});
+expand_delta(SeqId, #delta { start_seq_id = StartSeqId,
+ count = Count,
+ transient = Transient } = Delta,
+ IsPersistent )
+ when SeqId < StartSeqId ->
+ d(Delta #delta { start_seq_id = SeqId, count = Count + 1,
+ transient = Transient + one_if(not IsPersistent)});
+expand_delta(SeqId, #delta { count = Count,
+ end_seq_id = EndSeqId,
+ transient = Transient } = Delta,
+ IsPersistent)
+ when SeqId >= EndSeqId ->
+ d(Delta #delta { count = Count + 1, end_seq_id = SeqId + 1,
+ transient = Transient + one_if(not IsPersistent)});
+expand_delta(_SeqId, #delta { count = Count,
+ transient = Transient } = Delta,
+ IsPersistent ) ->
+ d(Delta #delta { count = Count + 1,
+ transient = Transient + one_if(not IsPersistent) }).
+
+%%----------------------------------------------------------------------------
+%% Internal major helpers for Public API
+%%----------------------------------------------------------------------------
+
+init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms,
+ PersistentClient, TransientClient, VHost) ->
+ {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState),
+
+ {DeltaCount1, DeltaBytes1} =
+ case Terms of
+ non_clean_shutdown -> {DeltaCount, DeltaBytes};
+ _ -> {proplists:get_value(persistent_count,
+ Terms, DeltaCount),
+ proplists:get_value(persistent_bytes,
+ Terms, DeltaBytes)}
+ end,
+ Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of
+ true -> ?BLANK_DELTA;
+ false -> d(#delta { start_seq_id = LowSeqId,
+ count = DeltaCount1,
+ transient = 0,
+ end_seq_id = NextSeqId })
+ end,
+ Now = erlang:monotonic_time(),
+ IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
+ ?IO_BATCH_SIZE),
+
+ {ok, IndexMaxSize} = application:get_env(
+ rabbit, queue_index_embed_msgs_below),
+ State = #vqstate {
+ q1 = ?QUEUE:new(),
+ q2 = ?QUEUE:new(),
+ delta = Delta,
+ q3 = ?QUEUE:new(),
+ q4 = ?QUEUE:new(),
+ next_seq_id = NextSeqId,
+ ram_pending_ack = gb_trees:empty(),
+ disk_pending_ack = gb_trees:empty(),
+ qi_pending_ack = gb_trees:empty(),
+ index_state = IndexState1,
+ msg_store_clients = {PersistentClient, TransientClient},
+ durable = IsDurable,
+ transient_threshold = NextSeqId,
+ qi_embed_msgs_below = IndexMaxSize,
+
+ len = DeltaCount1,
+ persistent_count = DeltaCount1,
+ bytes = DeltaBytes1,
+ persistent_bytes = DeltaBytes1,
+ delta_transient_bytes = 0,
+
+ target_ram_count = infinity,
+ ram_msg_count = 0,
+ ram_msg_count_prev = 0,
+ ram_ack_count_prev = 0,
+ ram_bytes = 0,
+ unacked_bytes = 0,
+ out_counter = 0,
+ in_counter = 0,
+ rates = blank_rates(Now),
+ msgs_on_disk = gb_sets:new(),
+ msg_indices_on_disk = gb_sets:new(),
+ unconfirmed = gb_sets:new(),
+ confirmed = gb_sets:new(),
+ ack_out_counter = 0,
+ ack_in_counter = 0,
+ disk_read_count = 0,
+ disk_write_count = 0,
+
+ io_batch_size = IoBatchSize,
+
+ mode = default,
+ memory_reduction_run_count = 0,
+ virtual_host = VHost},
+ a(maybe_deltas_to_betas(State)).
+
+blank_rates(Now) ->
+ #rates { in = 0.0,
+ out = 0.0,
+ ack_in = 0.0,
+ ack_out = 0.0,
+ timestamp = Now}.
+
+in_r(MsgStatus = #msg_status { msg = undefined },
+ State = #vqstate { mode = default, q3 = Q3, q4 = Q4 }) ->
+ case ?QUEUE:is_empty(Q4) of
+ true -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) };
+ false -> {Msg, State1 = #vqstate { q4 = Q4a }} =
+ read_msg(MsgStatus, State),
+ MsgStatus1 = MsgStatus#msg_status{msg = Msg},
+ stats(ready0, {MsgStatus, MsgStatus1}, 0,
+ State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus1, Q4a) })
+ end;
+in_r(MsgStatus,
+ State = #vqstate { mode = default, q4 = Q4 }) ->
+ State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) };
+%% lazy queues
+in_r(MsgStatus = #msg_status { seq_id = SeqId, is_persistent = IsPersistent },
+ State = #vqstate { mode = lazy, q3 = Q3, delta = Delta}) ->
+ case ?QUEUE:is_empty(Q3) of
+ true ->
+ {_MsgStatus1, State1} =
+ maybe_write_to_disk(true, true, MsgStatus, State),
+ State2 = stats(ready0, {MsgStatus, none}, 1, State1),
+ Delta1 = expand_delta(SeqId, Delta, IsPersistent),
+ State2 #vqstate{ delta = Delta1};
+ false ->
+ State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) }
+ end.
+
+queue_out(State = #vqstate { mode = default, q4 = Q4 }) ->
+ case ?QUEUE:out(Q4) of
+ {empty, _Q4} ->
+ case fetch_from_q3(State) of
+ {empty, _State1} = Result -> Result;
+ {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+ end;
+ {{value, MsgStatus}, Q4a} ->
+ {{value, MsgStatus}, State #vqstate { q4 = Q4a }}
+ end;
+%% lazy queues
+queue_out(State = #vqstate { mode = lazy }) ->
+ case fetch_from_q3(State) of
+ {empty, _State1} = Result -> Result;
+ {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+ end.
+
+read_msg(#msg_status{msg = undefined,
+ msg_id = MsgId,
+ is_persistent = IsPersistent}, State) ->
+ read_msg(MsgId, IsPersistent, State);
+read_msg(#msg_status{msg = Msg}, State) ->
+ {Msg, State}.
+
+read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState,
+ disk_read_count = Count}) ->
+ {{ok, Msg = #basic_message {}}, MSCState1} =
+ msg_store_read(MSCState, IsPersistent, MsgId),
+ {Msg, State #vqstate {msg_store_clients = MSCState1,
+ disk_read_count = Count + 1}}.
+
+stats(Signs, Statuses, DeltaPaged, State) ->
+ stats0(expand_signs(Signs), expand_statuses(Statuses), DeltaPaged, State).
+
+expand_signs(ready0) -> {0, 0, true};
+expand_signs(lazy_pub) -> {1, 0, true};
+expand_signs({A, B}) -> {A, B, false}.
+
+expand_statuses({none, A}) -> {false, msg_in_ram(A), A};
+expand_statuses({B, none}) -> {msg_in_ram(B), false, B};
+expand_statuses({lazy, A}) -> {false , false, A};
+expand_statuses({B, A}) -> {msg_in_ram(B), msg_in_ram(A), B}.
+
+%% In this function at least, we are religious: the variable name
+%% contains "Ready" or "Unacked" iff that is what it counts. If
+%% neither is present it counts both.
+stats0({DeltaReady, DeltaUnacked, ReadyMsgPaged},
+ {InRamBefore, InRamAfter, MsgStatus}, DeltaPaged,
+ State = #vqstate{len = ReadyCount,
+ bytes = ReadyBytes,
+ ram_msg_count = RamReadyCount,
+ persistent_count = PersistentCount,
+ unacked_bytes = UnackedBytes,
+ ram_bytes = RamBytes,
+ delta_transient_bytes = DeltaBytes,
+ persistent_bytes = PersistentBytes}) ->
+ S = msg_size(MsgStatus),
+ DeltaTotal = DeltaReady + DeltaUnacked,
+ DeltaRam = case {InRamBefore, InRamAfter} of
+ {false, false} -> 0;
+ {false, true} -> 1;
+ {true, false} -> -1;
+ {true, true} -> 0
+ end,
+ DeltaRamReady = case DeltaReady of
+ 1 -> one_if(InRamAfter);
+ -1 -> -one_if(InRamBefore);
+ 0 when ReadyMsgPaged -> DeltaRam;
+ 0 -> 0
+ end,
+ DeltaPersistent = DeltaTotal * one_if(MsgStatus#msg_status.is_persistent),
+ State#vqstate{len = ReadyCount + DeltaReady,
+ ram_msg_count = RamReadyCount + DeltaRamReady,
+ persistent_count = PersistentCount + DeltaPersistent,
+ bytes = ReadyBytes + DeltaReady * S,
+ unacked_bytes = UnackedBytes + DeltaUnacked * S,
+ ram_bytes = RamBytes + DeltaRam * S,
+ persistent_bytes = PersistentBytes + DeltaPersistent * S,
+ delta_transient_bytes = DeltaBytes + DeltaPaged * one_if(not MsgStatus#msg_status.is_persistent) * S}.
+
+msg_size(#msg_status{msg_props = #message_properties{size = Size}}) -> Size.
+
+msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined.
+
+%% first param: AckRequired
+remove(true, MsgStatus = #msg_status {
+ seq_id = SeqId,
+ is_delivered = IsDelivered,
+ index_on_disk = IndexOnDisk },
+ State = #vqstate {out_counter = OutCount,
+ index_state = IndexState}) ->
+ %% Mark it delivered if necessary
+ IndexState1 = maybe_write_delivered(
+ IndexOnDisk andalso not IsDelivered,
+ SeqId, IndexState),
+
+ State1 = record_pending_ack(
+ MsgStatus #msg_status {
+ is_delivered = true }, State),
+
+ State2 = stats({-1, 1}, {MsgStatus, MsgStatus}, 0, State1),
+
+ {SeqId, maybe_update_rates(
+ State2 #vqstate {out_counter = OutCount + 1,
+ index_state = IndexState1})};
+
+%% This function body has the same behaviour as remove_queue_entries/3
+%% but instead of removing messages based on a ?QUEUE, this removes
+%% just one message, the one referenced by the MsgStatus provided.
+remove(false, MsgStatus = #msg_status {
+ seq_id = SeqId,
+ msg_id = MsgId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ State = #vqstate {out_counter = OutCount,
+ index_state = IndexState,
+ msg_store_clients = MSCState}) ->
+ %% Mark it delivered if necessary
+ IndexState1 = maybe_write_delivered(
+ IndexOnDisk andalso not IsDelivered,
+ SeqId, IndexState),
+
+ %% Remove from msg_store and queue index, if necessary
+ case MsgInStore of
+ true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+ false -> ok
+ end,
+
+ IndexState2 =
+ case IndexOnDisk of
+ true -> rabbit_queue_index:ack([SeqId], IndexState1);
+ false -> IndexState1
+ end,
+
+ State1 = stats({-1, 0}, {MsgStatus, none}, 0, State),
+
+ {undefined, maybe_update_rates(
+ State1 #vqstate {out_counter = OutCount + 1,
+ index_state = IndexState2})}.
+
+%% This function exists as a way to improve dropwhile/2
+%% performance. The idea of having this function is to optimise calls
+%% to rabbit_queue_index by batching delivers and acks, instead of
+%% sending them one by one.
+%%
+%% Instead of removing every message as their are popped from the
+%% queue, it first accumulates them and then removes them by calling
+%% remove_queue_entries/3, since the behaviour of
+%% remove_queue_entries/3 when used with
+%% process_delivers_and_acks_fun(deliver_and_ack) is the same as
+%% calling remove(false, MsgStatus, State).
+%%
+%% remove/3 also updates the out_counter in every call, but here we do
+%% it just once at the end.
+remove_by_predicate(Pred, State = #vqstate {out_counter = OutCount}) ->
+ {MsgProps, QAcc, State1} =
+ collect_by_predicate(Pred, ?QUEUE:new(), State),
+ State2 =
+ remove_queue_entries(
+ QAcc, process_delivers_and_acks_fun(deliver_and_ack), State1),
+ %% maybe_update_rates/1 is called in remove/2 for every
+ %% message. Since we update out_counter only once, we call it just
+ %% there.
+ {MsgProps, maybe_update_rates(
+ State2 #vqstate {
+ out_counter = OutCount + ?QUEUE:len(QAcc)})}.
+
+%% This function exists as a way to improve fetchwhile/4
+%% performance. The idea of having this function is to optimise calls
+%% to rabbit_queue_index by batching delivers, instead of sending them
+%% one by one.
+%%
+%% Fun is the function passed to fetchwhile/4 that's
+%% applied to every fetched message and used to build the fetchwhile/4
+%% result accumulator FetchAcc.
+fetch_by_predicate(Pred, Fun, FetchAcc,
+ State = #vqstate {
+ index_state = IndexState,
+ out_counter = OutCount}) ->
+ {MsgProps, QAcc, State1} =
+ collect_by_predicate(Pred, ?QUEUE:new(), State),
+
+ {Delivers, FetchAcc1, State2} =
+ process_queue_entries(QAcc, Fun, FetchAcc, State1),
+
+ IndexState1 = rabbit_queue_index:deliver(Delivers, IndexState),
+
+ {MsgProps, FetchAcc1, maybe_update_rates(
+ State2 #vqstate {
+ index_state = IndexState1,
+ out_counter = OutCount + ?QUEUE:len(QAcc)})}.
+
+%% We try to do here the same as what remove(true, State) does but
+%% processing several messages at the same time. The idea is to
+%% optimize rabbit_queue_index:deliver/2 calls by sending a list of
+%% SeqIds instead of one by one, thus process_queue_entries1 will
+%% accumulate the required deliveries, will record_pending_ack for
+%% each message, and will update stats, like remove/2 does.
+%%
+%% For the meaning of Fun and FetchAcc arguments see
+%% fetch_by_predicate/4 above.
+process_queue_entries(Q, Fun, FetchAcc, State) ->
+ ?QUEUE:foldl(fun (MsgStatus, Acc) ->
+ process_queue_entries1(MsgStatus, Fun, Acc)
+ end,
+ {[], FetchAcc, State}, Q).
+
+process_queue_entries1(
+ #msg_status { seq_id = SeqId, is_delivered = IsDelivered,
+ index_on_disk = IndexOnDisk} = MsgStatus,
+ Fun,
+ {Delivers, FetchAcc, State}) ->
+ {Msg, State1} = read_msg(MsgStatus, State),
+ State2 = record_pending_ack(
+ MsgStatus #msg_status {
+ is_delivered = true }, State1),
+ {cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+ Fun(Msg, SeqId, FetchAcc),
+ stats({-1, 1}, {MsgStatus, MsgStatus}, 0, State2)}.
+
+collect_by_predicate(Pred, QAcc, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {undefined, QAcc, State1};
+ {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
+ case Pred(MsgProps) of
+ true -> collect_by_predicate(Pred, ?QUEUE:in(MsgStatus, QAcc),
+ State1);
+ false -> {MsgProps, QAcc, in_r(MsgStatus, State1)}
+ end
+ end.
+
+%%----------------------------------------------------------------------------
+%% Helpers for Public API purge/1 function
+%%----------------------------------------------------------------------------
+
+%% The difference between purge_when_pending_acks/1
+%% vs. purge_and_index_reset/1 is that the first one issues a deliver
+%% and an ack to the queue index for every message that's being
+%% removed, while the later just resets the queue index state.
+purge_when_pending_acks(State) ->
+ State1 = purge1(process_delivers_and_acks_fun(deliver_and_ack), State),
+ a(State1).
+
+purge_and_index_reset(State) ->
+ State1 = purge1(process_delivers_and_acks_fun(none), State),
+ a(reset_qi_state(State1)).
+
+%% This function removes messages from each of {q1, q2, q3, q4}.
+%%
+%% With remove_queue_entries/3 q1 and q4 are emptied, while q2 and q3
+%% are specially handled by purge_betas_and_deltas/2.
+%%
+%% purge_betas_and_deltas/2 loads messages from the queue index,
+%% filling up q3 and in some cases moving messages form q2 to q3 while
+%% resetting q2 to an empty queue (see maybe_deltas_to_betas/2). The
+%% messages loaded into q3 are removed by calling
+%% remove_queue_entries/3 until there are no more messages to be read
+%% from the queue index. Messages are read in batches from the queue
+%% index.
+purge1(AfterFun, State = #vqstate { q4 = Q4}) ->
+ State1 = remove_queue_entries(Q4, AfterFun, State),
+
+ State2 = #vqstate {q1 = Q1} =
+ purge_betas_and_deltas(AfterFun, State1#vqstate{q4 = ?QUEUE:new()}),
+
+ State3 = remove_queue_entries(Q1, AfterFun, State2),
+
+ a(State3#vqstate{q1 = ?QUEUE:new()}).
+
+reset_qi_state(State = #vqstate{index_state = IndexState}) ->
+ State#vqstate{index_state =
+ rabbit_queue_index:reset_state(IndexState)}.
+
+is_pending_ack_empty(State) ->
+ count_pending_acks(State) =:= 0.
+
+is_unconfirmed_empty(#vqstate { unconfirmed = UC }) ->
+ gb_sets:is_empty(UC).
+
+count_pending_acks(#vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA).
+
+purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { mode = Mode }) ->
+ State0 = #vqstate { q3 = Q3 } =
+ case Mode of
+ lazy -> maybe_deltas_to_betas(DelsAndAcksFun, State);
+ _ -> State
+ end,
+
+ case ?QUEUE:is_empty(Q3) of
+ true -> State0;
+ false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State0),
+ purge_betas_and_deltas(DelsAndAcksFun,
+ maybe_deltas_to_betas(
+ DelsAndAcksFun,
+ State1#vqstate{q3 = ?QUEUE:new()}))
+ end.
+
+remove_queue_entries(Q, DelsAndAcksFun,
+ State = #vqstate{msg_store_clients = MSCState}) ->
+ {MsgIdsByStore, Delivers, Acks, State1} =
+ ?QUEUE:foldl(fun remove_queue_entries1/2,
+ {maps:new(), [], [], State}, Q),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ DelsAndAcksFun(Delivers, Acks, State1).
+
+remove_queue_entries1(
+ #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered,
+ msg_in_store = MsgInStore, index_on_disk = IndexOnDisk,
+ is_persistent = IsPersistent} = MsgStatus,
+ {MsgIdsByStore, Delivers, Acks, State}) ->
+ {case MsgInStore of
+ true -> rabbit_misc:maps_cons(IsPersistent, MsgId, MsgIdsByStore);
+ false -> MsgIdsByStore
+ end,
+ cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+ cons_if(IndexOnDisk, SeqId, Acks),
+ stats({-1, 0}, {MsgStatus, none}, 0, State)}.
+
+process_delivers_and_acks_fun(deliver_and_ack) ->
+ fun (Delivers, Acks, State = #vqstate { index_state = IndexState }) ->
+ IndexState1 =
+ rabbit_queue_index:ack(
+ Acks, rabbit_queue_index:deliver(Delivers, IndexState)),
+ State #vqstate { index_state = IndexState1 }
+ end;
+process_delivers_and_acks_fun(_) ->
+ fun (_, _, State) ->
+ State
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for publishing
+%%----------------------------------------------------------------------------
+
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, _Flow, PersistFun,
+ State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
+ mode = default,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+ State2 = case ?QUEUE:is_empty(Q3) of
+ false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
+ true -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
+ end,
+ InCount1 = InCount + 1,
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ stats({1, 0}, {none, MsgStatus1}, 0,
+ State2#vqstate{ next_seq_id = SeqId + 1,
+ in_counter = InCount1,
+ unconfirmed = UC1 });
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = lazy,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC,
+ delta = Delta}) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+ Delta1 = expand_delta(SeqId, Delta, IsPersistent),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ stats(lazy_pub, {lazy, m(MsgStatus1)}, 1,
+ State1#vqstate{ delta = Delta1,
+ next_seq_id = SeqId + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1}).
+
+batch_publish1({Msg, MsgProps, IsDelivered}, {ChPid, Flow, State}) ->
+ {ChPid, Flow, publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ fun maybe_prepare_write_to_disk/4, State)}.
+
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = default,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = stats({0, 1}, {none, MsgStatus1}, 0,
+ State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }),
+ {SeqId, State3};
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = lazy,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = stats({0, 1}, {none, MsgStatus1}, 0,
+ State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }),
+ {SeqId, State3}.
+
+batch_publish_delivered1({Msg, MsgProps}, {ChPid, Flow, SeqIds, State}) ->
+ {SeqId, State1} =
+ publish_delivered1(Msg, MsgProps, ChPid, Flow,
+ fun maybe_prepare_write_to_disk/4,
+ State),
+ {ChPid, Flow, [SeqId | SeqIds], State1}.
+
+maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status {
+ msg_in_store = true }, State) ->
+ {MsgStatus, State};
+maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
+ msg = Msg, msg_id = MsgId,
+ is_persistent = IsPersistent },
+ State = #vqstate{ msg_store_clients = MSCState,
+ disk_write_count = Count})
+ when Force orelse IsPersistent ->
+ case persist_to(MsgStatus) of
+ msg_store -> ok = msg_store_write(MSCState, IsPersistent, MsgId,
+ prepare_to_store(Msg)),
+ {MsgStatus#msg_status{msg_in_store = true},
+ State#vqstate{disk_write_count = Count + 1}};
+ queue_index -> {MsgStatus, State}
+ end;
+maybe_write_msg_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+%% Due to certain optimisations made inside
+%% rabbit_queue_index:pre_publish/7 we need to have two separate
+%% functions for index persistence. This one is only used when paging
+%% during memory pressure. We didn't want to modify
+%% maybe_write_index_to_disk/3 because that function is used in other
+%% places.
+maybe_batch_write_index_to_disk(_Force,
+ MsgStatus = #msg_status {
+ index_on_disk = true }, State) ->
+ {MsgStatus, State};
+maybe_batch_write_index_to_disk(Force,
+ MsgStatus = #msg_status {
+ msg = Msg,
+ msg_id = MsgId,
+ seq_id = SeqId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_props = MsgProps},
+ State = #vqstate {
+ target_ram_count = TargetRamCount,
+ disk_write_count = DiskWriteCount,
+ index_state = IndexState})
+ when Force orelse IsPersistent ->
+ {MsgOrId, DiskWriteCount1} =
+ case persist_to(MsgStatus) of
+ msg_store -> {MsgId, DiskWriteCount};
+ queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+ end,
+ IndexState1 = rabbit_queue_index:pre_publish(
+ MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered,
+ TargetRamCount, IndexState),
+ {MsgStatus#msg_status{index_on_disk = true},
+ State#vqstate{index_state = IndexState1,
+ disk_write_count = DiskWriteCount1}};
+maybe_batch_write_index_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
+ index_on_disk = true }, State) ->
+ {MsgStatus, State};
+maybe_write_index_to_disk(Force, MsgStatus = #msg_status {
+ msg = Msg,
+ msg_id = MsgId,
+ seq_id = SeqId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_props = MsgProps},
+ State = #vqstate{target_ram_count = TargetRamCount,
+ disk_write_count = DiskWriteCount,
+ index_state = IndexState})
+ when Force orelse IsPersistent ->
+ {MsgOrId, DiskWriteCount1} =
+ case persist_to(MsgStatus) of
+ msg_store -> {MsgId, DiskWriteCount};
+ queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+ end,
+ IndexState1 = rabbit_queue_index:publish(
+ MsgOrId, SeqId, MsgProps, IsPersistent, TargetRamCount,
+ IndexState),
+ IndexState2 = maybe_write_delivered(IsDelivered, SeqId, IndexState1),
+ {MsgStatus#msg_status{index_on_disk = true},
+ State#vqstate{index_state = IndexState2,
+ disk_write_count = DiskWriteCount1}};
+
+maybe_write_index_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+ maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+maybe_prepare_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+ maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+determine_persist_to(#basic_message{
+ content = #content{properties = Props,
+ properties_bin = PropsBin}},
+ #message_properties{size = BodySize},
+ IndexMaxSize) ->
+ %% The >= is so that you can set the env to 0 and never persist
+ %% to the index.
+ %%
+ %% We want this to be fast, so we avoid size(term_to_binary())
+ %% here, or using the term size estimation from truncate.erl, both
+ %% of which are too slow. So instead, if the message body size
+ %% goes over the limit then we avoid any other checks.
+ %%
+ %% If it doesn't we need to decide if the properties will push
+ %% it past the limit. If we have the encoded properties (usual
+ %% case) we can just check their size. If we don't (message came
+ %% via the direct client), we make a guess based on the number of
+ %% headers.
+ case BodySize >= IndexMaxSize of
+ true -> msg_store;
+ false -> Est = case is_binary(PropsBin) of
+ true -> BodySize + size(PropsBin);
+ false -> #'P_basic'{headers = Hs} = Props,
+ case Hs of
+ undefined -> 0;
+ _ -> length(Hs)
+ end * ?HEADER_GUESS_SIZE + BodySize
+ end,
+ case Est >= IndexMaxSize of
+ true -> msg_store;
+ false -> queue_index
+ end
+ end.
+
+persist_to(#msg_status{persist_to = To}) -> To.
+
+prepare_to_store(Msg) ->
+ Msg#basic_message{
+ %% don't persist any recoverable decoded properties
+ content = rabbit_binary_parser:clear_decoded_content(
+ Msg #basic_message.content)}.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for acks
+%%----------------------------------------------------------------------------
+
+record_pending_ack(#msg_status { seq_id = SeqId } = MsgStatus,
+ State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA,
+ ack_in_counter = AckInCount}) ->
+ Insert = fun (Tree) -> gb_trees:insert(SeqId, MsgStatus, Tree) end,
+ {RPA1, DPA1, QPA1} =
+ case {msg_in_ram(MsgStatus), persist_to(MsgStatus)} of
+ {false, _} -> {RPA, Insert(DPA), QPA};
+ {_, queue_index} -> {RPA, DPA, Insert(QPA)};
+ {_, msg_store} -> {Insert(RPA), DPA, QPA}
+ end,
+ State #vqstate { ram_pending_ack = RPA1,
+ disk_pending_ack = DPA1,
+ qi_pending_ack = QPA1,
+ ack_in_counter = AckInCount + 1}.
+
+lookup_pending_ack(SeqId, #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA}) ->
+ case gb_trees:lookup(SeqId, RPA) of
+ {value, V} -> V;
+ none -> case gb_trees:lookup(SeqId, DPA) of
+ {value, V} -> V;
+ none -> gb_trees:get(SeqId, QPA)
+ end
+ end.
+
+%% First parameter = UpdateStats
+remove_pending_ack(true, SeqId, State) ->
+ case remove_pending_ack(false, SeqId, State) of
+ {none, _} ->
+ {none, State};
+ {MsgStatus, State1} ->
+ {MsgStatus, stats({0, -1}, {MsgStatus, none}, 0, State1)}
+ end;
+remove_pending_ack(false, SeqId, State = #vqstate{ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA}) ->
+ case gb_trees:lookup(SeqId, RPA) of
+ {value, V} -> RPA1 = gb_trees:delete(SeqId, RPA),
+ {V, State #vqstate { ram_pending_ack = RPA1 }};
+ none -> case gb_trees:lookup(SeqId, DPA) of
+ {value, V} ->
+ DPA1 = gb_trees:delete(SeqId, DPA),
+ {V, State#vqstate{disk_pending_ack = DPA1}};
+ none ->
+ case gb_trees:lookup(SeqId, QPA) of
+ {value, V} ->
+ QPA1 = gb_trees:delete(SeqId, QPA),
+ {V, State#vqstate{qi_pending_ack = QPA1}};
+ none ->
+ {none, State}
+ end
+ end
+ end.
+
+purge_pending_ack(KeepPersistent,
+ State = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ {IndexOnDiskSeqIds, MsgIdsByStore, State1} = purge_pending_ack1(State),
+ case KeepPersistent of
+ true -> remove_transient_msgs_by_id(MsgIdsByStore, MSCState),
+ State1;
+ false -> IndexState1 =
+ rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ State1 #vqstate { index_state = IndexState1 }
+ end.
+
+purge_pending_ack_delete_and_terminate(
+ State = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ {_, MsgIdsByStore, State1} = purge_pending_ack1(State),
+ IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ State1 #vqstate { index_state = IndexState1 }.
+
+purge_pending_ack1(State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
+ {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} =
+ rabbit_misc:gb_trees_fold(
+ F, rabbit_misc:gb_trees_fold(
+ F, rabbit_misc:gb_trees_fold(
+ F, accumulate_ack_init(), RPA), DPA), QPA),
+ State1 = State #vqstate { ram_pending_ack = gb_trees:empty(),
+ disk_pending_ack = gb_trees:empty(),
+ qi_pending_ack = gb_trees:empty()},
+ {IndexOnDiskSeqIds, MsgIdsByStore, State1}.
+
+%% MsgIdsByStore is an map with two keys:
+%%
+%% true: holds a list of Persistent Message Ids.
+%% false: holds a list of Transient Message Ids.
+%%
+%% When we call maps:to_list/1 we get two sets of msg ids, where
+%% IsPersistent is either true for persistent messages or false for
+%% transient ones. The msg_store_remove/3 function takes this boolean
+%% flag to determine from which store the messages should be removed
+%% from.
+remove_msgs_by_id(MsgIdsByStore, MSCState) ->
+ [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
+ || {IsPersistent, MsgIds} <- maps:to_list(MsgIdsByStore)].
+
+remove_transient_msgs_by_id(MsgIdsByStore, MSCState) ->
+ case maps:find(false, MsgIdsByStore) of
+ error -> ok;
+ {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, MsgIds)
+ end.
+
+accumulate_ack_init() -> {[], maps:new(), []}.
+
+accumulate_ack(#msg_status { seq_id = SeqId,
+ msg_id = MsgId,
+ is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ {IndexOnDiskSeqIdsAcc, MsgIdsByStore, AllMsgIds}) ->
+ {cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc),
+ case MsgInStore of
+ true -> rabbit_misc:maps_cons(IsPersistent, MsgId, MsgIdsByStore);
+ false -> MsgIdsByStore
+ end,
+ [MsgId | AllMsgIds]}.
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for confirms (aka publisher acks)
+%%----------------------------------------------------------------------------
+
+record_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC,
+ confirmed = C }) ->
+ State #vqstate {
+ msgs_on_disk = rabbit_misc:gb_sets_difference(MOD, MsgIdSet),
+ msg_indices_on_disk = rabbit_misc:gb_sets_difference(MIOD, MsgIdSet),
+ unconfirmed = rabbit_misc:gb_sets_difference(UC, MsgIdSet),
+ confirmed = gb_sets:union(C, MsgIdSet) }.
+
+msgs_written_to_disk(Callback, MsgIdSet, ignored) ->
+ Callback(?MODULE,
+ fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end);
+msgs_written_to_disk(Callback, MsgIdSet, written) ->
+ Callback(?MODULE,
+ fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC }) ->
+ Confirmed = gb_sets:intersection(UC, MsgIdSet),
+ record_confirms(gb_sets:intersection(MsgIdSet, MIOD),
+ State #vqstate {
+ msgs_on_disk =
+ gb_sets:union(MOD, Confirmed) })
+ end).
+
+msg_indices_written_to_disk(Callback, MsgIdSet) ->
+ Callback(?MODULE,
+ fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC }) ->
+ Confirmed = gb_sets:intersection(UC, MsgIdSet),
+ record_confirms(gb_sets:intersection(MsgIdSet, MOD),
+ State #vqstate {
+ msg_indices_on_disk =
+ gb_sets:union(MIOD, Confirmed) })
+ end).
+
+msgs_and_indices_written_to_disk(Callback, MsgIdSet) ->
+ Callback(?MODULE,
+ fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end).
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for requeue
+%%----------------------------------------------------------------------------
+
+publish_alpha(#msg_status { msg = undefined } = MsgStatus, State) ->
+ {Msg, State1} = read_msg(MsgStatus, State),
+ MsgStatus1 = MsgStatus#msg_status { msg = Msg },
+ {MsgStatus1, stats({1, -1}, {MsgStatus, MsgStatus1}, 0, State1)};
+publish_alpha(MsgStatus, State) ->
+ {MsgStatus, stats({1, -1}, {MsgStatus, MsgStatus}, 0, State)}.
+
+publish_beta(MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_prepare_write_to_disk(true, false, MsgStatus, State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ {MsgStatus2, stats({1, -1}, {MsgStatus, MsgStatus2}, 0, State1)}.
+
+%% Rebuild queue, inserting sequence ids to maintain ordering
+queue_merge(SeqIds, Q, MsgIds, Limit, PubFun, State) ->
+ queue_merge(SeqIds, Q, ?QUEUE:new(), MsgIds,
+ Limit, PubFun, State).
+
+queue_merge([SeqId | Rest] = SeqIds, Q, Front, MsgIds,
+ Limit, PubFun, State)
+ when Limit == undefined orelse SeqId < Limit ->
+ case ?QUEUE:out(Q) of
+ {{value, #msg_status { seq_id = SeqIdQ } = MsgStatus}, Q1}
+ when SeqIdQ < SeqId ->
+ %% enqueue from the remaining queue
+ queue_merge(SeqIds, Q1, ?QUEUE:in(MsgStatus, Front), MsgIds,
+ Limit, PubFun, State);
+ {_, _Q1} ->
+ %% enqueue from the remaining list of sequence ids
+ case msg_from_pending_ack(SeqId, State) of
+ {none, _} ->
+ queue_merge(Rest, Q, Front, MsgIds, Limit, PubFun, State);
+ {MsgStatus, State1} ->
+ {#msg_status { msg_id = MsgId } = MsgStatus1, State2} =
+ PubFun(MsgStatus, State1),
+ queue_merge(Rest, Q, ?QUEUE:in(MsgStatus1, Front), [MsgId | MsgIds],
+ Limit, PubFun, State2)
+ end
+ end;
+queue_merge(SeqIds, Q, Front, MsgIds,
+ _Limit, _PubFun, State) ->
+ {SeqIds, ?QUEUE:join(Front, Q), MsgIds, State}.
+
+delta_merge([], Delta, MsgIds, State) ->
+ {Delta, MsgIds, State};
+delta_merge(SeqIds, Delta, MsgIds, State) ->
+ lists:foldl(fun (SeqId, {Delta0, MsgIds0, State0} = Acc) ->
+ case msg_from_pending_ack(SeqId, State0) of
+ {none, _} ->
+ Acc;
+ {#msg_status { msg_id = MsgId,
+ is_persistent = IsPersistent } = MsgStatus, State1} ->
+ {_MsgStatus, State2} =
+ maybe_prepare_write_to_disk(true, true, MsgStatus, State1),
+ {expand_delta(SeqId, Delta0, IsPersistent), [MsgId | MsgIds0],
+ stats({1, -1}, {MsgStatus, none}, 1, State2)}
+ end
+ end, {Delta, MsgIds, State}, SeqIds).
+
+%% Mostly opposite of record_pending_ack/2
+msg_from_pending_ack(SeqId, State) ->
+ case remove_pending_ack(false, SeqId, State) of
+ {none, _} ->
+ {none, State};
+ {#msg_status { msg_props = MsgProps } = MsgStatus, State1} ->
+ {MsgStatus #msg_status {
+ msg_props = MsgProps #message_properties { needs_confirming = false } },
+ State1}
+ end.
+
+beta_limit(Q) ->
+ case ?QUEUE:peek(Q) of
+ {value, #msg_status { seq_id = SeqId }} -> SeqId;
+ empty -> undefined
+ end.
+
+delta_limit(?BLANK_DELTA_PATTERN(_X)) -> undefined;
+delta_limit(#delta { start_seq_id = StartSeqId }) -> StartSeqId.
+
+%%----------------------------------------------------------------------------
+%% Iterator
+%%----------------------------------------------------------------------------
+
+ram_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.ram_pending_ack)}.
+
+disk_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.disk_pending_ack)}.
+
+qi_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.qi_pending_ack)}.
+
+msg_iterator(State) -> istate(start, State).
+
+istate(start, State) -> {q4, State#vqstate.q4, State};
+istate(q4, State) -> {q3, State#vqstate.q3, State};
+istate(q3, State) -> {delta, State#vqstate.delta, State};
+istate(delta, State) -> {q2, State#vqstate.q2, State};
+istate(q2, State) -> {q1, State#vqstate.q1, State};
+istate(q1, _State) -> done.
+
+next({ack, It}, IndexState) ->
+ case gb_trees:next(It) of
+ none -> {empty, IndexState};
+ {_SeqId, MsgStatus, It1} -> Next = {ack, It1},
+ {value, MsgStatus, true, Next, IndexState}
+ end;
+next(done, IndexState) -> {empty, IndexState};
+next({delta, #delta{start_seq_id = SeqId,
+ end_seq_id = SeqId}, State}, IndexState) ->
+ next(istate(delta, State), IndexState);
+next({delta, #delta{start_seq_id = SeqId,
+ end_seq_id = SeqIdEnd} = Delta, State}, IndexState) ->
+ SeqIdB = rabbit_queue_index:next_segment_boundary(SeqId),
+ SeqId1 = lists:min([SeqIdB, SeqIdEnd]),
+ {List, IndexState1} = rabbit_queue_index:read(SeqId, SeqId1, IndexState),
+ next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1);
+next({delta, Delta, [], State}, IndexState) ->
+ next({delta, Delta, State}, IndexState);
+next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
+ case is_msg_in_pending_acks(SeqId, State) of
+ false -> Next = {delta, Delta, Rest, State},
+ {value, beta_msg_status(M), false, Next, IndexState};
+ true -> next({delta, Delta, Rest, State}, IndexState)
+ end;
+next({Key, Q, State}, IndexState) ->
+ case ?QUEUE:out(Q) of
+ {empty, _Q} -> next(istate(Key, State), IndexState);
+ {{value, MsgStatus}, QN} -> Next = {Key, QN, State},
+ {value, MsgStatus, false, Next, IndexState}
+ end.
+
+inext(It, {Its, IndexState}) ->
+ case next(It, IndexState) of
+ {empty, IndexState1} ->
+ {Its, IndexState1};
+ {value, MsgStatus1, Unacked, It1, IndexState1} ->
+ {[{MsgStatus1, Unacked, It1} | Its], IndexState1}
+ end.
+
+ifold(_Fun, Acc, [], State0) ->
+ {Acc, State0};
+ifold(Fun, Acc, Its0, State0) ->
+ [{MsgStatus, Unacked, It} | Rest] =
+ lists:sort(fun ({#msg_status{seq_id = SeqId1}, _, _},
+ {#msg_status{seq_id = SeqId2}, _, _}) ->
+ SeqId1 =< SeqId2
+ end, Its0),
+ {Msg, State1} = read_msg(MsgStatus, State0),
+ case Fun(Msg, MsgStatus#msg_status.msg_props, Unacked, Acc) of
+ {stop, Acc1} ->
+ {Acc1, State1};
+ {cont, Acc1} ->
+ IndexState0 = State1#vqstate.index_state,
+ {Its1, IndexState1} = inext(It, {Rest, IndexState0}),
+ State2 = State1#vqstate{index_state = IndexState1},
+ ifold(Fun, Acc1, Its1, State2)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Phase changes
+%%----------------------------------------------------------------------------
+
+maybe_reduce_memory_use(State = #vqstate {memory_reduction_run_count = MRedRunCount,
+ mode = Mode}) ->
+ case MRedRunCount >= ?EXPLICIT_GC_RUN_OP_THRESHOLD(Mode) of
+ true -> State1 = reduce_memory_use(State),
+ State1#vqstate{memory_reduction_run_count = 0};
+ false -> State#vqstate{memory_reduction_run_count = MRedRunCount + 1}
+ end.
+
+reduce_memory_use(State = #vqstate { target_ram_count = infinity }) ->
+ State;
+reduce_memory_use(State = #vqstate {
+ mode = default,
+ ram_pending_ack = RPA,
+ ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount,
+ io_batch_size = IoBatchSize,
+ rates = #rates { in = AvgIngress,
+ out = AvgEgress,
+ ack_in = AvgAckIngress,
+ ack_out = AvgAckEgress } }) ->
+ {CreditDiscBound, _} =rabbit_misc:get_env(rabbit,
+ msg_store_credit_disc_bound,
+ ?CREDIT_DISC_BOUND),
+ {NeedResumeA2B, State1} = {_, #vqstate { q2 = Q2, q3 = Q3 }} =
+ case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+ 0 -> {false, State};
+ %% Reduce memory of pending acks and alphas. The order is
+ %% determined based on which is growing faster. Whichever
+ %% comes second may very well get a quota of 0 if the
+ %% first manages to push out the max number of messages.
+ A2BChunk ->
+ %% In case there are few messages to be sent to a message store
+ %% and many messages to be embedded to the queue index,
+ %% we should limit the number of messages to be flushed
+ %% to avoid blocking the process.
+ A2BChunkActual = case A2BChunk > CreditDiscBound * 2 of
+ true -> CreditDiscBound * 2;
+ false -> A2BChunk
+ end,
+ Funs = case ((AvgAckIngress - AvgAckEgress) >
+ (AvgIngress - AvgEgress)) of
+ true -> [fun limit_ram_acks/2,
+ fun push_alphas_to_betas/2];
+ false -> [fun push_alphas_to_betas/2,
+ fun limit_ram_acks/2]
+ end,
+ {Quota, State2} = lists:foldl(fun (ReduceFun, {QuotaN, StateN}) ->
+ ReduceFun(QuotaN, StateN)
+ end, {A2BChunkActual, State}, Funs),
+ {(Quota == 0) andalso (A2BChunk > A2BChunkActual), State2}
+ end,
+ Permitted = permitted_beta_count(State1),
+ {NeedResumeB2D, State3} =
+ %% If there are more messages with their queue position held in RAM,
+ %% a.k.a. betas, in Q2 & Q3 than IoBatchSize,
+ %% write their queue position to disk, a.k.a. push_betas_to_deltas
+ case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3),
+ Permitted) of
+ B2DChunk when B2DChunk >= IoBatchSize ->
+ %% Same as for alphas to betas. Limit a number of messages
+ %% to be flushed to disk at once to avoid blocking the process.
+ B2DChunkActual = case B2DChunk > CreditDiscBound * 2 of
+ true -> CreditDiscBound * 2;
+ false -> B2DChunk
+ end,
+ StateBD = push_betas_to_deltas(B2DChunkActual, State1),
+ {B2DChunk > B2DChunkActual, StateBD};
+ _ ->
+ {false, State1}
+ end,
+ %% We can be blocked by the credit flow, or limited by a batch size,
+ %% or finished with flushing.
+ %% If blocked by the credit flow - the credit grant will resume processing,
+ %% if limited by a batch - the batch continuation message should be sent.
+ %% The continuation message will be prioritised over publishes,
+ %% but not consumptions, so the queue can make progess.
+ Blocked = credit_flow:blocked(),
+ case {Blocked, NeedResumeA2B orelse NeedResumeB2D} of
+ %% Credit bump will continue paging
+ {true, _} -> State3;
+ %% Finished with paging
+ {false, false} -> State3;
+ %% Planning next batch
+ {false, true} ->
+ %% We don't want to use self-credit-flow, because it's harder to
+ %% reason about. So the process sends a (prioritised) message to
+ %% itself and sets a waiting_bump value to keep the message box clean
+ maybe_bump_reduce_memory_use(State3)
+ end;
+%% When using lazy queues, there are no alphas, so we don't need to
+%% call push_alphas_to_betas/2.
+reduce_memory_use(State = #vqstate {
+ mode = lazy,
+ ram_pending_ack = RPA,
+ ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount }) ->
+ State1 = #vqstate { q3 = Q3 } =
+ case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+ 0 -> State;
+ S1 -> {_, State2} = limit_ram_acks(S1, State),
+ State2
+ end,
+
+ State3 =
+ case chunk_size(?QUEUE:len(Q3),
+ permitted_beta_count(State1)) of
+ 0 ->
+ State1;
+ S2 ->
+ push_betas_to_deltas(S2, State1)
+ end,
+ garbage_collect(),
+ State3.
+
+maybe_bump_reduce_memory_use(State = #vqstate{ waiting_bump = true }) ->
+ State;
+maybe_bump_reduce_memory_use(State) ->
+ self() ! bump_reduce_memory_use,
+ State#vqstate{ waiting_bump = true }.
+
+limit_ram_acks(0, State) ->
+ {0, ui(State)};
+limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA }) ->
+ case gb_trees:is_empty(RPA) of
+ true ->
+ {Quota, ui(State)};
+ false ->
+ {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA),
+ {MsgStatus1, State1} =
+ maybe_prepare_write_to_disk(true, false, MsgStatus, State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ DPA1 = gb_trees:insert(SeqId, MsgStatus2, DPA),
+ limit_ram_acks(Quota - 1,
+ stats({0, 0}, {MsgStatus, MsgStatus2}, 0,
+ State1 #vqstate { ram_pending_ack = RPA1,
+ disk_pending_ack = DPA1 }))
+ end.
+
+permitted_beta_count(#vqstate { len = 0 }) ->
+ infinity;
+permitted_beta_count(#vqstate { mode = lazy,
+ target_ram_count = TargetRamCount}) ->
+ TargetRamCount;
+permitted_beta_count(#vqstate { target_ram_count = 0, q3 = Q3 }) ->
+ lists:min([?QUEUE:len(Q3), rabbit_queue_index:next_segment_boundary(0)]);
+permitted_beta_count(#vqstate { q1 = Q1,
+ q4 = Q4,
+ target_ram_count = TargetRamCount,
+ len = Len }) ->
+ BetaDelta = Len - ?QUEUE:len(Q1) - ?QUEUE:len(Q4),
+ lists:max([rabbit_queue_index:next_segment_boundary(0),
+ BetaDelta - ((BetaDelta * BetaDelta) div
+ (BetaDelta + TargetRamCount))]).
+
+chunk_size(Current, Permitted)
+ when Permitted =:= infinity orelse Permitted >= Current ->
+ 0;
+chunk_size(Current, Permitted) ->
+ Current - Permitted.
+
+fetch_from_q3(State = #vqstate { mode = default,
+ q1 = Q1,
+ q2 = Q2,
+ delta = #delta { count = DeltaCount },
+ q3 = Q3,
+ q4 = Q4 }) ->
+ case ?QUEUE:out(Q3) of
+ {empty, _Q3} ->
+ {empty, State};
+ {{value, MsgStatus}, Q3a} ->
+ State1 = State #vqstate { q3 = Q3a },
+ State2 = case {?QUEUE:is_empty(Q3a), 0 == DeltaCount} of
+ {true, true} ->
+ %% q3 is now empty, it wasn't before;
+ %% delta is still empty. So q2 must be
+ %% empty, and we know q4 is empty
+ %% otherwise we wouldn't be loading from
+ %% q3. As such, we can just set q4 to Q1.
+ true = ?QUEUE:is_empty(Q2), %% ASSERTION
+ true = ?QUEUE:is_empty(Q4), %% ASSERTION
+ State1 #vqstate { q1 = ?QUEUE:new(), q4 = Q1 };
+ {true, false} ->
+ maybe_deltas_to_betas(State1);
+ {false, _} ->
+ %% q3 still isn't empty, we've not
+ %% touched delta, so the invariants
+ %% between q1, q2, delta and q3 are
+ %% maintained
+ State1
+ end,
+ {loaded, {MsgStatus, State2}}
+ end;
+%% lazy queues
+fetch_from_q3(State = #vqstate { mode = lazy,
+ delta = #delta { count = DeltaCount },
+ q3 = Q3 }) ->
+ case ?QUEUE:out(Q3) of
+ {empty, _Q3} when DeltaCount =:= 0 ->
+ {empty, State};
+ {empty, _Q3} ->
+ fetch_from_q3(maybe_deltas_to_betas(State));
+ {{value, MsgStatus}, Q3a} ->
+ State1 = State #vqstate { q3 = Q3a },
+ {loaded, {MsgStatus, State1}}
+ end.
+
+maybe_deltas_to_betas(State) ->
+ AfterFun = process_delivers_and_acks_fun(deliver_and_ack),
+ maybe_deltas_to_betas(AfterFun, State).
+
+maybe_deltas_to_betas(_DelsAndAcksFun,
+ State = #vqstate {delta = ?BLANK_DELTA_PATTERN(X) }) ->
+ State;
+maybe_deltas_to_betas(DelsAndAcksFun,
+ State = #vqstate {
+ q2 = Q2,
+ delta = Delta,
+ q3 = Q3,
+ index_state = IndexState,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes,
+ disk_read_count = DiskReadCount,
+ delta_transient_bytes = DeltaTransientBytes,
+ transient_threshold = TransientThreshold }) ->
+ #delta { start_seq_id = DeltaSeqId,
+ count = DeltaCount,
+ transient = Transient,
+ end_seq_id = DeltaSeqIdEnd } = Delta,
+ DeltaSeqId1 =
+ lists:min([rabbit_queue_index:next_segment_boundary(DeltaSeqId),
+ DeltaSeqIdEnd]),
+ {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1,
+ IndexState),
+ {Q3a, RamCountsInc, RamBytesInc, State1, TransientCount, TransientBytes} =
+ betas_from_index_entries(List, TransientThreshold,
+ DelsAndAcksFun,
+ State #vqstate { index_state = IndexState1 }),
+ State2 = State1 #vqstate { ram_msg_count = RamMsgCount + RamCountsInc,
+ ram_bytes = RamBytes + RamBytesInc,
+ disk_read_count = DiskReadCount + RamCountsInc },
+ case ?QUEUE:len(Q3a) of
+ 0 ->
+ %% we ignored every message in the segment due to it being
+ %% transient and below the threshold
+ maybe_deltas_to_betas(
+ DelsAndAcksFun,
+ State2 #vqstate {
+ delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })});
+ Q3aLen ->
+ Q3b = ?QUEUE:join(Q3, Q3a),
+ case DeltaCount - Q3aLen of
+ 0 ->
+ %% delta is now empty, but it wasn't before, so
+ %% can now join q2 onto q3
+ State2 #vqstate { q2 = ?QUEUE:new(),
+ delta = ?BLANK_DELTA,
+ q3 = ?QUEUE:join(Q3b, Q2),
+ delta_transient_bytes = 0};
+ N when N > 0 ->
+ Delta1 = d(#delta { start_seq_id = DeltaSeqId1,
+ count = N,
+ transient = Transient - TransientCount,
+ end_seq_id = DeltaSeqIdEnd }),
+ State2 #vqstate { delta = Delta1,
+ q3 = Q3b,
+ delta_transient_bytes = DeltaTransientBytes - TransientBytes }
+ end
+ end.
+
+push_alphas_to_betas(Quota, State) ->
+ {Quota1, State1} =
+ push_alphas_to_betas(
+ fun ?QUEUE:out/1,
+ fun (MsgStatus, Q1a,
+ State0 = #vqstate { q3 = Q3, delta = #delta { count = 0,
+ transient = 0 } }) ->
+ State0 #vqstate { q1 = Q1a, q3 = ?QUEUE:in(MsgStatus, Q3) };
+ (MsgStatus, Q1a, State0 = #vqstate { q2 = Q2 }) ->
+ State0 #vqstate { q1 = Q1a, q2 = ?QUEUE:in(MsgStatus, Q2) }
+ end, Quota, State #vqstate.q1, State),
+ {Quota2, State2} =
+ push_alphas_to_betas(
+ fun ?QUEUE:out_r/1,
+ fun (MsgStatus, Q4a, State0 = #vqstate { q3 = Q3 }) ->
+ State0 #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3), q4 = Q4a }
+ end, Quota1, State1 #vqstate.q4, State1),
+ {Quota2, State2}.
+
+push_alphas_to_betas(_Generator, _Consumer, Quota, _Q,
+ State = #vqstate { ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount })
+ when Quota =:= 0 orelse
+ TargetRamCount =:= infinity orelse
+ TargetRamCount >= RamMsgCount ->
+ {Quota, ui(State)};
+push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
+ %% We consume credits from the message_store whenever we need to
+ %% persist a message to disk. See:
+ %% rabbit_variable_queue:msg_store_write/4. So perhaps the
+ %% msg_store is trying to throttle down our queue.
+ case credit_flow:blocked() of
+ true -> {Quota, ui(State)};
+ false -> case Generator(Q) of
+ {empty, _Q} ->
+ {Quota, ui(State)};
+ {{value, MsgStatus}, Qa} ->
+ {MsgStatus1, State1} =
+ maybe_prepare_write_to_disk(true, false, MsgStatus,
+ State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ State2 = stats(
+ ready0, {MsgStatus, MsgStatus2}, 0, State1),
+ State3 = Consumer(MsgStatus2, Qa, State2),
+ push_alphas_to_betas(Generator, Consumer, Quota - 1,
+ Qa, State3)
+ end
+ end.
+
+push_betas_to_deltas(Quota, State = #vqstate { mode = default,
+ q2 = Q2,
+ delta = Delta,
+ q3 = Q3}) ->
+ PushState = {Quota, Delta, State},
+ {Q3a, PushState1} = push_betas_to_deltas(
+ fun ?QUEUE:out_r/1,
+ fun rabbit_queue_index:next_segment_boundary/1,
+ Q3, PushState),
+ {Q2a, PushState2} = push_betas_to_deltas(
+ fun ?QUEUE:out/1,
+ fun (Q2MinSeqId) -> Q2MinSeqId end,
+ Q2, PushState1),
+ {_, Delta1, State1} = PushState2,
+ State1 #vqstate { q2 = Q2a,
+ delta = Delta1,
+ q3 = Q3a };
+%% In the case of lazy queues we want to page as many messages as
+%% possible from q3.
+push_betas_to_deltas(Quota, State = #vqstate { mode = lazy,
+ delta = Delta,
+ q3 = Q3}) ->
+ PushState = {Quota, Delta, State},
+ {Q3a, PushState1} = push_betas_to_deltas(
+ fun ?QUEUE:out_r/1,
+ fun (Q2MinSeqId) -> Q2MinSeqId end,
+ Q3, PushState),
+ {_, Delta1, State1} = PushState1,
+ State1 #vqstate { delta = Delta1,
+ q3 = Q3a }.
+
+
+push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
+ case ?QUEUE:is_empty(Q) of
+ true ->
+ {Q, PushState};
+ false ->
+ {value, #msg_status { seq_id = MinSeqId }} = ?QUEUE:peek(Q),
+ {value, #msg_status { seq_id = MaxSeqId }} = ?QUEUE:peek_r(Q),
+ Limit = LimitFun(MinSeqId),
+ case MaxSeqId < Limit of
+ true -> {Q, PushState};
+ false -> push_betas_to_deltas1(Generator, Limit, Q, PushState)
+ end
+ end.
+
+push_betas_to_deltas1(_Generator, _Limit, Q, {0, Delta, State}) ->
+ {Q, {0, Delta, ui(State)}};
+push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State}) ->
+ case Generator(Q) of
+ {empty, _Q} ->
+ {Q, {Quota, Delta, ui(State)}};
+ {{value, #msg_status { seq_id = SeqId }}, _Qa}
+ when SeqId < Limit ->
+ {Q, {Quota, Delta, ui(State)}};
+ {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} ->
+ {#msg_status { index_on_disk = true,
+ is_persistent = IsPersistent }, State1} =
+ maybe_batch_write_index_to_disk(true, MsgStatus, State),
+ State2 = stats(ready0, {MsgStatus, none}, 1, State1),
+ Delta1 = expand_delta(SeqId, Delta, IsPersistent),
+ push_betas_to_deltas1(Generator, Limit, Qa,
+ {Quota - 1, Delta1, State2})
+ end.
+
+%% Flushes queue index batch caches and updates queue index state.
+ui(#vqstate{index_state = IndexState,
+ target_ram_count = TargetRamCount} = State) ->
+ IndexState1 = rabbit_queue_index:flush_pre_publish_cache(
+ TargetRamCount, IndexState),
+ State#vqstate{index_state = IndexState1}.
+
+%%----------------------------------------------------------------------------
+%% Upgrading
+%%----------------------------------------------------------------------------
+
+-spec multiple_routing_keys() -> 'ok'.
+
+multiple_routing_keys() ->
+ transform_storage(
+ fun ({basic_message, ExchangeName, Routing_Key, Content,
+ MsgId, Persistent}) ->
+ {ok, {basic_message, ExchangeName, [Routing_Key], Content,
+ MsgId, Persistent}};
+ (_) -> {error, corrupt_message}
+ end),
+ ok.
+
+
+%% Assumes message store is not running
+transform_storage(TransformFun) ->
+ transform_store(?PERSISTENT_MSG_STORE, TransformFun),
+ transform_store(?TRANSIENT_MSG_STORE, TransformFun).
+
+transform_store(Store, TransformFun) ->
+ rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store),
+ rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun).
+
+move_messages_to_vhost_store() ->
+ case list_persistent_queues() of
+ [] ->
+ log_upgrade("No durable queues found."
+ " Skipping message store migration"),
+ ok;
+ Queues ->
+ move_messages_to_vhost_store(Queues)
+ end,
+ ok = delete_old_store(),
+ ok = rabbit_queue_index:cleanup_global_recovery_terms().
+
+move_messages_to_vhost_store(Queues) ->
+ log_upgrade("Moving messages to per-vhost message store"),
+ %% Move the queue index for each persistent queue to the new store
+ lists:foreach(
+ fun(Queue) ->
+ QueueName = amqqueue:get_name(Queue),
+ rabbit_queue_index:move_to_per_vhost_stores(QueueName)
+ end,
+ Queues),
+ %% Legacy (global) msg_store may require recovery.
+ %% This upgrade step should only be started
+ %% if we are upgrading from a pre-3.7.0 version.
+ {QueuesWithTerms, RecoveryRefs, StartFunState} = read_old_recovery_terms(Queues),
+
+ OldStore = run_old_persistent_store(RecoveryRefs, StartFunState),
+
+ VHosts = rabbit_vhost:list_names(),
+
+ %% New store should not be recovered.
+ NewMsgStore = start_new_store(VHosts),
+ %% Recovery terms should be started for all vhosts for new store.
+ [ok = rabbit_recovery_terms:open_table(VHost) || VHost <- VHosts],
+
+ MigrationBatchSize = application:get_env(rabbit, queue_migration_batch_size,
+ ?QUEUE_MIGRATION_BATCH_SIZE),
+ in_batches(MigrationBatchSize,
+ {rabbit_variable_queue, migrate_queue, [OldStore, NewMsgStore]},
+ QueuesWithTerms,
+ "message_store upgrades: Migrating batch ~p of ~p queues. Out of total ~p ~n",
+ "message_store upgrades: Batch ~p of ~p queues migrated ~n. ~p total left"),
+
+ log_upgrade("Message store migration finished"),
+ ok = rabbit_sup:stop_child(OldStore),
+ [ok= rabbit_recovery_terms:close_table(VHost) || VHost <- VHosts],
+ ok = stop_new_store(NewMsgStore).
+
+in_batches(Size, MFA, List, MessageStart, MessageEnd) ->
+ in_batches(Size, 1, MFA, List, MessageStart, MessageEnd).
+
+in_batches(_, _, _, [], _, _) -> ok;
+in_batches(Size, BatchNum, MFA, List, MessageStart, MessageEnd) ->
+ Length = length(List),
+ {Batch, Tail} = case Size > Length of
+ true -> {List, []};
+ false -> lists:split(Size, List)
+ end,
+ ProcessedLength = (BatchNum - 1) * Size,
+ rabbit_log:info(MessageStart, [BatchNum, Size, ProcessedLength + Length]),
+ {M, F, A} = MFA,
+ Keys = [ rpc:async_call(node(), M, F, [El | A]) || El <- Batch ],
+ lists:foreach(fun(Key) ->
+ case rpc:yield(Key) of
+ {badrpc, Err} -> throw(Err);
+ _ -> ok
+ end
+ end,
+ Keys),
+ rabbit_log:info(MessageEnd, [BatchNum, Size, length(Tail)]),
+ in_batches(Size, BatchNum + 1, MFA, Tail, MessageStart, MessageEnd).
+
+migrate_queue({QueueName = #resource{virtual_host = VHost, name = Name},
+ RecoveryTerm},
+ OldStore, NewStore) ->
+ log_upgrade_verbose(
+ "Migrating messages in queue ~s in vhost ~s to per-vhost message store~n",
+ [Name, VHost]),
+ OldStoreClient = get_global_store_client(OldStore),
+ NewStoreClient = get_per_vhost_store_client(QueueName, NewStore),
+ %% WARNING: During scan_queue_segments queue index state is being recovered
+ %% and terminated. This can cause side effects!
+ rabbit_queue_index:scan_queue_segments(
+ %% We migrate only persistent messages which are found in message store
+ %% and are not acked yet
+ fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, OldC)
+ when is_binary(MsgId) ->
+ migrate_message(MsgId, OldC, NewStoreClient);
+ (_SeqId, _MsgId, _MsgProps,
+ _IsPersistent, _IsDelivered, _IsAcked, OldC) ->
+ OldC
+ end,
+ OldStoreClient,
+ QueueName),
+ rabbit_msg_store:client_terminate(OldStoreClient),
+ rabbit_msg_store:client_terminate(NewStoreClient),
+ NewClientRef = rabbit_msg_store:client_ref(NewStoreClient),
+ case RecoveryTerm of
+ non_clean_shutdown -> ok;
+ Term when is_list(Term) ->
+ NewRecoveryTerm = lists:keyreplace(persistent_ref, 1, RecoveryTerm,
+ {persistent_ref, NewClientRef}),
+ rabbit_queue_index:update_recovery_term(QueueName, NewRecoveryTerm)
+ end,
+ log_upgrade_verbose("Finished migrating queue ~s in vhost ~s", [Name, VHost]),
+ {QueueName, NewClientRef}.
+
+migrate_message(MsgId, OldC, NewC) ->
+ case rabbit_msg_store:read(MsgId, OldC) of
+ {{ok, Msg}, OldC1} ->
+ ok = rabbit_msg_store:write(MsgId, Msg, NewC),
+ OldC1;
+ _ -> OldC
+ end.
+
+get_per_vhost_store_client(#resource{virtual_host = VHost}, NewStore) ->
+ {VHost, StorePid} = lists:keyfind(VHost, 1, NewStore),
+ rabbit_msg_store:client_init(StorePid, rabbit_guid:gen(),
+ fun(_,_) -> ok end, fun() -> ok end).
+
+get_global_store_client(OldStore) ->
+ rabbit_msg_store:client_init(OldStore,
+ rabbit_guid:gen(),
+ fun(_,_) -> ok end,
+ fun() -> ok end).
+
+list_persistent_queues() ->
+ Node = node(),
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(qlc:q([Q || Q <- mnesia:table(rabbit_durable_queue),
+ ?amqqueue_is_classic(Q),
+ amqqueue:qnode(Q) == Node,
+ mnesia:read(rabbit_queue, amqqueue:get_name(Q), read) =:= []]))
+ end).
+
+read_old_recovery_terms([]) ->
+ {[], [], ?EMPTY_START_FUN_STATE};
+read_old_recovery_terms(Queues) ->
+ QueueNames = [amqqueue:get_name(Q) || Q <- Queues],
+ {AllTerms, StartFunState} = rabbit_queue_index:read_global_recovery_terms(QueueNames),
+ Refs = [Ref || Terms <- AllTerms,
+ Terms /= non_clean_shutdown,
+ begin
+ Ref = proplists:get_value(persistent_ref, Terms),
+ Ref =/= undefined
+ end],
+ {lists:zip(QueueNames, AllTerms), Refs, StartFunState}.
+
+run_old_persistent_store(Refs, StartFunState) ->
+ OldStoreName = ?PERSISTENT_MSG_STORE,
+ ok = rabbit_sup:start_child(OldStoreName, rabbit_msg_store, start_global_store_link,
+ [OldStoreName, rabbit_mnesia:dir(),
+ Refs, StartFunState]),
+ OldStoreName.
+
+start_new_store(VHosts) ->
+ %% Ensure vhost supervisor is started, so we can add vhosts to it.
+ lists:map(fun(VHost) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ {ok, Pid} = rabbit_msg_store:start_link(?PERSISTENT_MSG_STORE,
+ VHostDir,
+ undefined,
+ ?EMPTY_START_FUN_STATE),
+ {VHost, Pid}
+ end,
+ VHosts).
+
+stop_new_store(NewStore) ->
+ lists:foreach(fun({_VHost, StorePid}) ->
+ unlink(StorePid),
+ exit(StorePid, shutdown)
+ end,
+ NewStore),
+ ok.
+
+delete_old_store() ->
+ log_upgrade("Removing the old message store data"),
+ rabbit_file:recursive_delete(
+ [filename:join([rabbit_mnesia:dir(), ?PERSISTENT_MSG_STORE])]),
+ %% Delete old transient store as well
+ rabbit_file:recursive_delete(
+ [filename:join([rabbit_mnesia:dir(), ?TRANSIENT_MSG_STORE])]),
+ ok.
+
+log_upgrade(Msg) ->
+ log_upgrade(Msg, []).
+
+log_upgrade(Msg, Args) ->
+ rabbit_log:info("message_store upgrades: " ++ Msg, Args).
+
+log_upgrade_verbose(Msg) ->
+ log_upgrade_verbose(Msg, []).
+
+log_upgrade_verbose(Msg, Args) ->
+ rabbit_log_upgrade:info(Msg, Args).
+
+maybe_client_terminate(MSCStateP) ->
+ %% Queue might have been asked to stop by the supervisor, it needs a clean
+ %% shutdown in order for the supervising strategy to work - if it reaches max
+ %% restarts might bring the vhost down.
+ try
+ rabbit_msg_store:client_terminate(MSCStateP)
+ catch
+ _:_ ->
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_version.erl b/deps/rabbit/src/rabbit_version.erl
new file mode 100644
index 0000000000..3f5462c7b4
--- /dev/null
+++ b/deps/rabbit/src/rabbit_version.erl
@@ -0,0 +1,227 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_version).
+
+-export([recorded/0, matches/2, desired/0, desired_for_scope/1,
+ record_desired/0, record_desired_for_scope/1,
+ upgrades_required/1, all_upgrades_required/1,
+ check_version_consistency/3,
+ check_version_consistency/4, check_otp_consistency/1,
+ version_error/3]).
+
+%% -------------------------------------------------------------------
+
+-export_type([scope/0, step/0]).
+
+-type scope() :: atom().
+-type scope_version() :: [atom()].
+-type step() :: {atom(), atom()}.
+
+-type version() :: [atom()].
+
+%% -------------------------------------------------------------------
+
+-define(VERSION_FILENAME, "schema_version").
+-define(SCOPES, [mnesia, local]).
+
+%% -------------------------------------------------------------------
+
+-spec recorded() -> rabbit_types:ok_or_error2(version(), any()).
+
+recorded() -> case rabbit_file:read_term_file(schema_filename()) of
+ {ok, [V]} -> {ok, V};
+ {error, _} = Err -> Err
+ end.
+
+record(V) -> ok = rabbit_file:write_term_file(schema_filename(), [V]).
+
+recorded_for_scope(Scope) ->
+ case recorded() of
+ {error, _} = Err ->
+ Err;
+ {ok, Version} ->
+ {ok, case lists:keysearch(Scope, 1, categorise_by_scope(Version)) of
+ false -> [];
+ {value, {Scope, SV1}} -> SV1
+ end}
+ end.
+
+record_for_scope(Scope, ScopeVersion) ->
+ case recorded() of
+ {error, _} = Err ->
+ Err;
+ {ok, Version} ->
+ Version1 = lists:keystore(Scope, 1, categorise_by_scope(Version),
+ {Scope, ScopeVersion}),
+ ok = record([Name || {_Scope, Names} <- Version1, Name <- Names])
+ end.
+
+%% -------------------------------------------------------------------
+
+-spec matches([A], [A]) -> boolean().
+
+matches(VerA, VerB) ->
+ lists:usort(VerA) =:= lists:usort(VerB).
+
+%% -------------------------------------------------------------------
+
+-spec desired() -> version().
+
+desired() -> [Name || Scope <- ?SCOPES, Name <- desired_for_scope(Scope)].
+
+-spec desired_for_scope(scope()) -> scope_version().
+
+desired_for_scope(Scope) -> with_upgrade_graph(fun heads/1, Scope).
+
+-spec record_desired() -> 'ok'.
+
+record_desired() -> record(desired()).
+
+-spec record_desired_for_scope
+ (scope()) -> rabbit_types:ok_or_error(any()).
+
+record_desired_for_scope(Scope) ->
+ record_for_scope(Scope, desired_for_scope(Scope)).
+
+-spec upgrades_required
+ (scope()) -> rabbit_types:ok_or_error2([step()], any()).
+
+upgrades_required(Scope) ->
+ case recorded_for_scope(Scope) of
+ {error, enoent} ->
+ case filelib:is_file(rabbit_guid:filename()) of
+ false -> {error, starting_from_scratch};
+ true -> {error, version_not_available}
+ end;
+ {ok, CurrentHeads} ->
+ with_upgrade_graph(
+ fun (G) ->
+ case unknown_heads(CurrentHeads, G) of
+ [] -> {ok, upgrades_to_apply(CurrentHeads, G)};
+ Unknown -> {error, {future_upgrades_found, Unknown}}
+ end
+ end, Scope)
+ end.
+
+all_upgrades_required(Scopes) ->
+ case recorded() of
+ {error, enoent} ->
+ case filelib:is_file(rabbit_guid:filename()) of
+ false -> {error, starting_from_scratch};
+ true -> {error, version_not_available}
+ end;
+ {ok, _} ->
+ lists:foldl(
+ fun
+ (_, {error, Err}) -> {error, Err};
+ (Scope, {ok, Acc}) ->
+ case upgrades_required(Scope) of
+ %% Lift errors from any scope.
+ {error, Err} -> {error, Err};
+ %% Filter non-upgradable scopes
+ {ok, []} -> {ok, Acc};
+ {ok, Upgrades} -> {ok, [{Scope, Upgrades} | Acc]}
+ end
+ end,
+ {ok, []},
+ Scopes)
+ end.
+
+%% -------------------------------------------------------------------
+
+with_upgrade_graph(Fun, Scope) ->
+ case rabbit_misc:build_acyclic_graph(
+ fun ({_App, Module, Steps}) -> vertices(Module, Steps, Scope) end,
+ fun ({_App, Module, Steps}) -> edges(Module, Steps, Scope) end,
+ rabbit_misc:all_module_attributes(rabbit_upgrade)) of
+ {ok, G} -> try
+ Fun(G)
+ after
+ true = digraph:delete(G)
+ end;
+ {error, {vertex, duplicate, StepName}} ->
+ throw({error, {duplicate_upgrade_step, StepName}});
+ {error, {edge, {bad_vertex, StepName}, _From, _To}} ->
+ throw({error, {dependency_on_unknown_upgrade_step, StepName}});
+ {error, {edge, {bad_edge, StepNames}, _From, _To}} ->
+ throw({error, {cycle_in_upgrade_steps, StepNames}})
+ end.
+
+vertices(Module, Steps, Scope0) ->
+ [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps,
+ Scope0 == Scope1].
+
+edges(_Module, Steps, Scope0) ->
+ [{Require, StepName} || {StepName, Scope1, Requires} <- Steps,
+ Require <- Requires,
+ Scope0 == Scope1].
+unknown_heads(Heads, G) ->
+ [H || H <- Heads, digraph:vertex(G, H) =:= false].
+
+upgrades_to_apply(Heads, G) ->
+ %% Take all the vertices which can reach the known heads. That's
+ %% everything we've already applied. Subtract that from all
+ %% vertices: that's what we have to apply.
+ Unsorted = sets:to_list(
+ sets:subtract(
+ sets:from_list(digraph:vertices(G)),
+ sets:from_list(digraph_utils:reaching(Heads, G)))),
+ %% Form a subgraph from that list and find a topological ordering
+ %% so we can invoke them in order.
+ [element(2, digraph:vertex(G, StepName)) ||
+ StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))].
+
+heads(G) ->
+ lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]).
+
+%% -------------------------------------------------------------------
+
+categorise_by_scope(Version) when is_list(Version) ->
+ Categorised =
+ [{Scope, Name} || {_App, _Module, Attributes} <-
+ rabbit_misc:all_module_attributes(rabbit_upgrade),
+ {Name, Scope, _Requires} <- Attributes,
+ lists:member(Name, Version)],
+ maps:to_list(
+ lists:foldl(fun ({Scope, Name}, CatVersion) ->
+ rabbit_misc:maps_cons(Scope, Name, CatVersion)
+ end, maps:new(), Categorised)).
+
+dir() -> rabbit_mnesia:dir().
+
+schema_filename() -> filename:join(dir(), ?VERSION_FILENAME).
+
+%% --------------------------------------------------------------------
+
+-spec check_version_consistency
+ (string(), string(), string()) -> rabbit_types:ok_or_error(any()).
+
+check_version_consistency(This, Remote, Name) ->
+ check_version_consistency(This, Remote, Name, fun (A, B) -> A =:= B end).
+
+-spec check_version_consistency
+ (string(), string(), string(),
+ fun((string(), string()) -> boolean())) ->
+ rabbit_types:ok_or_error(any()).
+
+check_version_consistency(This, Remote, Name, Comp) ->
+ case Comp(This, Remote) of
+ true -> ok;
+ false -> version_error(Name, This, Remote)
+ end.
+
+version_error(Name, This, Remote) ->
+ {error, {inconsistent_cluster,
+ rabbit_misc:format("~s version mismatch: local node is ~s, "
+ "remote node ~s", [Name, This, Remote])}}.
+
+-spec check_otp_consistency
+ (string()) -> rabbit_types:ok_or_error(any()).
+
+check_otp_consistency(Remote) ->
+ check_version_consistency(rabbit_misc:otp_release(), Remote, "OTP").
diff --git a/deps/rabbit/src/rabbit_vhost.erl b/deps/rabbit/src/rabbit_vhost.erl
new file mode 100644
index 0000000000..c8c5fc961a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost.erl
@@ -0,0 +1,422 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("vhost.hrl").
+
+-export([recover/0, recover/1]).
+-export([add/2, add/4, delete/2, exists/1, with/2, with_user_and_vhost/3, assert/1, update/2,
+ set_limits/2, vhost_cluster_state/1, is_running_on_all_nodes/1, await_running_on_all_nodes/2,
+ list/0, count/0, list_names/0, all/0, parse_tags/1]).
+-export([info/1, info/2, info_all/0, info_all/1, info_all/2, info_all/3]).
+-export([dir/1, msg_store_dir_path/1, msg_store_dir_wildcard/0]).
+-export([delete_storage/1]).
+-export([vhost_down/1]).
+-export([put_vhost/5]).
+
+%%
+%% API
+%%
+
+recover() ->
+ %% Clear out remnants of old incarnation, in case we restarted
+ %% faster than other nodes handled DOWN messages from us.
+ rabbit_amqqueue:on_node_down(node()),
+
+ rabbit_amqqueue:warn_file_limit(),
+
+ %% Prepare rabbit_semi_durable_route table
+ rabbit_binding:recover(),
+
+ %% rabbit_vhost_sup_sup will start the actual recovery.
+ %% So recovery will be run every time a vhost supervisor is restarted.
+ ok = rabbit_vhost_sup_sup:start(),
+
+ [ok = rabbit_vhost_sup_sup:init_vhost(VHost) || VHost <- list_names()],
+ ok.
+
+recover(VHost) ->
+ VHostDir = msg_store_dir_path(VHost),
+ rabbit_log:info("Making sure data directory '~ts' for vhost '~s' exists~n",
+ [VHostDir, VHost]),
+ VHostStubFile = filename:join(VHostDir, ".vhost"),
+ ok = rabbit_file:ensure_dir(VHostStubFile),
+ ok = file:write_file(VHostStubFile, VHost),
+ {Recovered, Failed} = rabbit_amqqueue:recover(VHost),
+ AllQs = Recovered ++ Failed,
+ QNames = [amqqueue:get_name(Q) || Q <- AllQs],
+ ok = rabbit_binding:recover(rabbit_exchange:recover(VHost), QNames),
+ ok = rabbit_amqqueue:start(Recovered),
+ %% Start queue mirrors.
+ ok = rabbit_mirror_queue_misc:on_vhost_up(VHost),
+ ok.
+
+-define(INFO_KEYS, vhost:info_keys()).
+
+-spec parse_tags(binary() | string() | atom()) -> [atom()].
+parse_tags(undefined) ->
+ [];
+parse_tags("") ->
+ [];
+parse_tags(<<"">>) ->
+ [];
+parse_tags(Val) when is_binary(Val) ->
+ parse_tags(rabbit_data_coercion:to_list(Val));
+parse_tags(Val) when is_list(Val) ->
+ [trim_tag(Tag) || Tag <- re:split(Val, ",", [{return, list}])].
+
+-spec add(vhost:name(), rabbit_types:username()) -> rabbit_types:ok_or_error(any()).
+
+add(VHost, ActingUser) ->
+ case exists(VHost) of
+ true -> ok;
+ false -> do_add(VHost, <<"">>, [], ActingUser)
+ end.
+
+-spec add(vhost:name(), binary(), [atom()], rabbit_types:username()) -> rabbit_types:ok_or_error(any()).
+
+add(Name, Description, Tags, ActingUser) ->
+ case exists(Name) of
+ true -> ok;
+ false -> do_add(Name, Description, Tags, ActingUser)
+ end.
+
+do_add(Name, Description, Tags, ActingUser) ->
+ case Description of
+ undefined ->
+ rabbit_log:info("Adding vhost '~s' without a description", [Name]);
+ Value ->
+ rabbit_log:info("Adding vhost '~s' (description: '~s')", [Name, Value])
+ end,
+ VHost = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_vhost, Name}) of
+ [] ->
+ Row = vhost:new(Name, [], #{description => Description, tags => Tags}),
+ rabbit_log:debug("Inserting a virtual host record ~p", [Row]),
+ ok = mnesia:write(rabbit_vhost, Row, write),
+ Row;
+ %% the vhost already exists
+ [Row] ->
+ Row
+ end
+ end,
+ fun (VHost1, true) ->
+ VHost1;
+ (VHost1, false) ->
+ [begin
+ Resource = rabbit_misc:r(Name, exchange, ExchangeName),
+ rabbit_log:debug("Will declare an exchange ~p", [Resource]),
+ _ = rabbit_exchange:declare(Resource, Type, true, false, Internal, [], ActingUser)
+ end || {ExchangeName, Type, Internal} <-
+ [{<<"">>, direct, false},
+ {<<"amq.direct">>, direct, false},
+ {<<"amq.topic">>, topic, false},
+ %% per 0-9-1 pdf
+ {<<"amq.match">>, headers, false},
+ %% per 0-9-1 xml
+ {<<"amq.headers">>, headers, false},
+ {<<"amq.fanout">>, fanout, false},
+ {<<"amq.rabbitmq.trace">>, topic, true}]],
+ VHost1
+ end),
+ case rabbit_vhost_sup_sup:start_on_all_nodes(Name) of
+ ok ->
+ rabbit_event:notify(vhost_created, info(VHost)
+ ++ [{user_who_performed_action, ActingUser},
+ {description, Description},
+ {tags, Tags}]),
+ ok;
+ {error, Reason} ->
+ Msg = rabbit_misc:format("failed to set up vhost '~s': ~p",
+ [Name, Reason]),
+ {error, Msg}
+ end.
+
+-spec delete(vhost:name(), rabbit_types:username()) -> rabbit_types:ok_or_error(any()).
+
+delete(VHost, ActingUser) ->
+ %% FIXME: We are forced to delete the queues and exchanges outside
+ %% the TX below. Queue deletion involves sending messages to the queue
+ %% process, which in turn results in further mnesia actions and
+ %% eventually the termination of that process. Exchange deletion causes
+ %% notifications which must be sent outside the TX
+ rabbit_log:info("Deleting vhost '~s'~n", [VHost]),
+ QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end,
+ [begin
+ Name = amqqueue:get_name(Q),
+ assert_benign(rabbit_amqqueue:with(Name, QDelFun), ActingUser)
+ end || Q <- rabbit_amqqueue:list(VHost)],
+ [assert_benign(rabbit_exchange:delete(Name, false, ActingUser), ActingUser) ||
+ #exchange{name = Name} <- rabbit_exchange:list(VHost)],
+ Funs = rabbit_misc:execute_mnesia_transaction(
+ with(VHost, fun () -> internal_delete(VHost, ActingUser) end)),
+ ok = rabbit_event:notify(vhost_deleted, [{name, VHost},
+ {user_who_performed_action, ActingUser}]),
+ [case Fun() of
+ ok -> ok;
+ {error, {no_such_vhost, VHost}} -> ok
+ end || Fun <- Funs],
+ %% After vhost was deleted from mnesia DB, we try to stop vhost supervisors
+ %% on all the nodes.
+ rabbit_vhost_sup_sup:delete_on_all_nodes(VHost),
+ ok.
+
+put_vhost(Name, Description, Tags0, Trace, Username) ->
+ Tags = case Tags0 of
+ undefined -> <<"">>;
+ null -> <<"">>;
+ "undefined" -> <<"">>;
+ "null" -> <<"">>;
+ Other -> Other
+ end,
+ Result = case exists(Name) of
+ true -> ok;
+ false -> add(Name, Description, parse_tags(Tags), Username),
+ %% wait for up to 45 seconds for the vhost to initialise
+ %% on all nodes
+ case await_running_on_all_nodes(Name, 45000) of
+ ok ->
+ maybe_grant_full_permissions(Name, Username);
+ {error, timeout} ->
+ {error, timeout}
+ end
+ end,
+ case Trace of
+ true -> rabbit_trace:start(Name);
+ false -> rabbit_trace:stop(Name);
+ undefined -> ok
+ end,
+ Result.
+
+%% when definitions are loaded on boot, Username here will be ?INTERNAL_USER,
+%% which does not actually exist
+maybe_grant_full_permissions(_Name, ?INTERNAL_USER) ->
+ ok;
+maybe_grant_full_permissions(Name, Username) ->
+ U = rabbit_auth_backend_internal:lookup_user(Username),
+ maybe_grant_full_permissions(U, Name, Username).
+
+maybe_grant_full_permissions({ok, _}, Name, Username) ->
+ rabbit_auth_backend_internal:set_permissions(
+ Username, Name, <<".*">>, <<".*">>, <<".*">>, Username);
+maybe_grant_full_permissions(_, _Name, _Username) ->
+ ok.
+
+
+%% 50 ms
+-define(AWAIT_SAMPLE_INTERVAL, 50).
+
+-spec await_running_on_all_nodes(vhost:name(), integer()) -> ok | {error, timeout}.
+await_running_on_all_nodes(VHost, Timeout) ->
+ Attempts = round(Timeout / ?AWAIT_SAMPLE_INTERVAL),
+ await_running_on_all_nodes0(VHost, Attempts).
+
+await_running_on_all_nodes0(_VHost, 0) ->
+ {error, timeout};
+await_running_on_all_nodes0(VHost, Attempts) ->
+ case is_running_on_all_nodes(VHost) of
+ true -> ok;
+ _ ->
+ timer:sleep(?AWAIT_SAMPLE_INTERVAL),
+ await_running_on_all_nodes0(VHost, Attempts - 1)
+ end.
+
+-spec is_running_on_all_nodes(vhost:name()) -> boolean().
+is_running_on_all_nodes(VHost) ->
+ States = vhost_cluster_state(VHost),
+ lists:all(fun ({_Node, State}) -> State =:= running end,
+ States).
+
+-spec vhost_cluster_state(vhost:name()) -> [{atom(), atom()}].
+vhost_cluster_state(VHost) ->
+ Nodes = rabbit_nodes:all_running(),
+ lists:map(fun(Node) ->
+ State = case rabbit_misc:rpc_call(Node,
+ rabbit_vhost_sup_sup, is_vhost_alive,
+ [VHost]) of
+ {badrpc, nodedown} -> nodedown;
+ true -> running;
+ false -> stopped
+ end,
+ {Node, State}
+ end,
+ Nodes).
+
+vhost_down(VHost) ->
+ ok = rabbit_event:notify(vhost_down,
+ [{name, VHost},
+ {node, node()},
+ {user_who_performed_action, ?INTERNAL_USER}]).
+
+delete_storage(VHost) ->
+ VhostDir = msg_store_dir_path(VHost),
+ rabbit_log:info("Deleting message store directory for vhost '~s' at '~s'~n", [VHost, VhostDir]),
+ %% Message store should be closed when vhost supervisor is closed.
+ case rabbit_file:recursive_delete([VhostDir]) of
+ ok -> ok;
+ {error, {_, enoent}} ->
+ %% a concurrent delete did the job for us
+ rabbit_log:warning("Tried to delete storage directories for vhost '~s', it failed with an ENOENT", [VHost]),
+ ok;
+ Other ->
+ rabbit_log:warning("Tried to delete storage directories for vhost '~s': ~p", [VHost, Other]),
+ Other
+ end.
+
+assert_benign(ok, _) -> ok;
+assert_benign({ok, _}, _) -> ok;
+assert_benign({ok, _, _}, _) -> ok;
+assert_benign({error, not_found}, _) -> ok;
+assert_benign({error, {absent, Q, _}}, ActingUser) ->
+ %% Removing the mnesia entries here is safe. If/when the down node
+ %% restarts, it will clear out the on-disk storage of the queue.
+ QName = amqqueue:get_name(Q),
+ rabbit_amqqueue:internal_delete(QName, ActingUser).
+
+internal_delete(VHost, ActingUser) ->
+ [ok = rabbit_auth_backend_internal:clear_permissions(
+ proplists:get_value(user, Info), VHost, ActingUser)
+ || Info <- rabbit_auth_backend_internal:list_vhost_permissions(VHost)],
+ TopicPermissions = rabbit_auth_backend_internal:list_vhost_topic_permissions(VHost),
+ [ok = rabbit_auth_backend_internal:clear_topic_permissions(
+ proplists:get_value(user, TopicPermission), VHost, ActingUser)
+ || TopicPermission <- TopicPermissions],
+ Fs1 = [rabbit_runtime_parameters:clear(VHost,
+ proplists:get_value(component, Info),
+ proplists:get_value(name, Info),
+ ActingUser)
+ || Info <- rabbit_runtime_parameters:list(VHost)],
+ Fs2 = [rabbit_policy:delete(VHost, proplists:get_value(name, Info), ActingUser)
+ || Info <- rabbit_policy:list(VHost)],
+ ok = mnesia:delete({rabbit_vhost, VHost}),
+ Fs1 ++ Fs2.
+
+-spec exists(vhost:name()) -> boolean().
+
+exists(VHost) ->
+ mnesia:dirty_read({rabbit_vhost, VHost}) /= [].
+
+-spec list_names() -> [vhost:name()].
+list_names() -> mnesia:dirty_all_keys(rabbit_vhost).
+
+%% Exists for backwards compatibility, prefer list_names/0.
+-spec list() -> [vhost:name()].
+list() -> list_names().
+
+-spec all() -> [vhost:vhost()].
+all() -> mnesia:dirty_match_object(rabbit_vhost, vhost:pattern_match_all()).
+
+-spec count() -> non_neg_integer().
+count() ->
+ length(list()).
+
+-spec with(vhost:name(), rabbit_misc:thunk(A)) -> A.
+
+with(VHost, Thunk) ->
+ fun () ->
+ case mnesia:read({rabbit_vhost, VHost}) of
+ [] ->
+ mnesia:abort({no_such_vhost, VHost});
+ [_V] ->
+ Thunk()
+ end
+ end.
+
+-spec with_user_and_vhost
+ (rabbit_types:username(), vhost:name(), rabbit_misc:thunk(A)) -> A.
+
+with_user_and_vhost(Username, VHost, Thunk) ->
+ rabbit_misc:with_user(Username, with(VHost, Thunk)).
+
+%% Like with/2 but outside an Mnesia tx
+
+-spec assert(vhost:name()) -> 'ok'.
+
+assert(VHost) -> case exists(VHost) of
+ true -> ok;
+ false -> throw({error, {no_such_vhost, VHost}})
+ end.
+
+-spec update(vhost:name(), fun((vhost:vhost()) -> vhost:vhost())) -> vhost:vhost().
+
+update(VHost, Fun) ->
+ case mnesia:read({rabbit_vhost, VHost}) of
+ [] ->
+ mnesia:abort({no_such_vhost, VHost});
+ [V] ->
+ V1 = Fun(V),
+ ok = mnesia:write(rabbit_vhost, V1, write),
+ V1
+ end.
+
+set_limits(VHost, undefined) ->
+ vhost:set_limits(VHost, []);
+set_limits(VHost, Limits) ->
+ vhost:set_limits(VHost, Limits).
+
+
+dir(Vhost) ->
+ <<Num:128>> = erlang:md5(Vhost),
+ rabbit_misc:format("~.36B", [Num]).
+
+msg_store_dir_path(VHost) ->
+ EncodedName = dir(VHost),
+ rabbit_data_coercion:to_list(filename:join([msg_store_dir_base(), EncodedName])).
+
+msg_store_dir_wildcard() ->
+ rabbit_data_coercion:to_list(filename:join([msg_store_dir_base(), "*"])).
+
+msg_store_dir_base() ->
+ Dir = rabbit_mnesia:dir(),
+ filename:join([Dir, "msg_stores", "vhosts"]).
+
+-spec trim_tag(list() | binary() | atom()) -> atom().
+trim_tag(Val) ->
+ rabbit_data_coercion:to_atom(string:trim(rabbit_data_coercion:to_list(Val))).
+
+%%----------------------------------------------------------------------------
+
+infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
+
+i(name, VHost) -> vhost:get_name(VHost);
+i(tracing, VHost) -> rabbit_trace:enabled(vhost:get_name(VHost));
+i(cluster_state, VHost) -> vhost_cluster_state(vhost:get_name(VHost));
+i(description, VHost) -> vhost:get_description(VHost);
+i(tags, VHost) -> vhost:get_tags(VHost);
+i(metadata, VHost) -> vhost:get_metadata(VHost);
+i(Item, VHost) ->
+ rabbit_log:error("Don't know how to compute a virtual host info item '~s' for virtual host '~p'", [Item, VHost]),
+ throw({bad_argument, Item}).
+
+-spec info(vhost:vhost() | vhost:name()) -> rabbit_types:infos().
+
+info(VHost) when ?is_vhost(VHost) ->
+ infos(?INFO_KEYS, VHost);
+info(Key) ->
+ case mnesia:dirty_read({rabbit_vhost, Key}) of
+ [] -> [];
+ [VHost] -> infos(?INFO_KEYS, VHost)
+ end.
+
+-spec info(vhost:vhost(), rabbit_types:info_keys()) -> rabbit_types:infos().
+info(VHost, Items) -> infos(Items, VHost).
+
+-spec info_all() -> [rabbit_types:infos()].
+info_all() -> info_all(?INFO_KEYS).
+
+-spec info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()].
+info_all(Items) -> [info(VHost, Items) || VHost <- all()].
+
+info_all(Ref, AggregatorPid) -> info_all(?INFO_KEYS, Ref, AggregatorPid).
+
+-spec info_all(rabbit_types:info_keys(), reference(), pid()) -> 'ok'.
+info_all(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(VHost) -> info(VHost, Items) end, all()).
diff --git a/deps/rabbit/src/rabbit_vhost_limit.erl b/deps/rabbit/src/rabbit_vhost_limit.erl
new file mode 100644
index 0000000000..bee01f3054
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_limit.erl
@@ -0,0 +1,205 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost_limit).
+
+-behaviour(rabbit_runtime_parameter).
+
+-include("rabbit.hrl").
+
+-export([register/0]).
+-export([parse_set/3, set/3, clear/2]).
+-export([list/0, list/1]).
+-export([update_limit/4, clear_limit/3, get_limit/2]).
+-export([validate/5, notify/5, notify_clear/4]).
+-export([connection_limit/1, queue_limit/1,
+ is_over_queue_limit/1, would_exceed_queue_limit/2,
+ is_over_connection_limit/1]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "vhost limit parameters"},
+ {mfa, {rabbit_vhost_limit, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+%%----------------------------------------------------------------------------
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"vhost-limits">>, ?MODULE).
+
+validate(_VHost, <<"vhost-limits">>, Name, Term, _User) ->
+ rabbit_parameter_validation:proplist(
+ Name, vhost_limit_validation(), Term).
+
+notify(VHost, <<"vhost-limits">>, <<"limits">>, Limits, ActingUser) ->
+ rabbit_event:notify(vhost_limits_set, [{name, <<"limits">>},
+ {user_who_performed_action, ActingUser}
+ | Limits]),
+ update_vhost(VHost, Limits).
+
+notify_clear(VHost, <<"vhost-limits">>, <<"limits">>, ActingUser) ->
+ rabbit_event:notify(vhost_limits_cleared, [{name, <<"limits">>},
+ {user_who_performed_action, ActingUser}]),
+ %% If the function is called as a part of vhost deletion, the vhost can
+ %% be already deleted.
+ case rabbit_vhost:exists(VHost) of
+ true -> update_vhost(VHost, undefined);
+ false -> ok
+ end.
+
+connection_limit(VirtualHost) ->
+ get_limit(VirtualHost, <<"max-connections">>).
+
+queue_limit(VirtualHost) ->
+ get_limit(VirtualHost, <<"max-queues">>).
+
+
+query_limits(VHost) ->
+ case rabbit_runtime_parameters:list(VHost, <<"vhost-limits">>) of
+ [] -> [];
+ Params -> [ {pget(vhost, Param), pget(value, Param)}
+ || Param <- Params,
+ pget(value, Param) =/= undefined,
+ pget(name, Param) == <<"limits">> ]
+ end.
+
+
+-spec list() -> [{vhost:name(), rabbit_types:infos()}].
+list() ->
+ query_limits('_').
+
+-spec list(vhost:name()) -> rabbit_types:infos().
+list(VHost) ->
+ case query_limits(VHost) of
+ [] -> [];
+ [{VHost, Value}] -> Value
+ end.
+
+-spec is_over_connection_limit(vhost:name()) -> {true, non_neg_integer()} | false.
+
+is_over_connection_limit(VirtualHost) ->
+ case rabbit_vhost_limit:connection_limit(VirtualHost) of
+ %% no limit configured
+ undefined -> false;
+ %% with limit = 0, no connections are allowed
+ {ok, 0} -> {true, 0};
+ {ok, Limit} when is_integer(Limit) andalso Limit > 0 ->
+ ConnectionCount =
+ rabbit_connection_tracking:count_tracked_items_in({vhost, VirtualHost}),
+ case ConnectionCount >= Limit of
+ false -> false;
+ true -> {true, Limit}
+ end;
+ %% any negative value means "no limit". Note that parameter validation
+ %% will replace negative integers with 'undefined', so this is to be
+ %% explicit and extra defensive
+ {ok, Limit} when is_integer(Limit) andalso Limit < 0 -> false;
+ %% ignore non-integer limits
+ {ok, _Limit} -> false
+ end.
+
+-spec would_exceed_queue_limit(non_neg_integer(), vhost:name()) ->
+ {true, non_neg_integer(), non_neg_integer()} | false.
+
+would_exceed_queue_limit(AdditionalCount, VirtualHost) ->
+ case queue_limit(VirtualHost) of
+ undefined ->
+ %% no limit configured
+ false;
+ {ok, 0} ->
+ %% with limit = 0, no queues can be declared (perhaps not very
+ %% useful but consistent with the connection limit)
+ {true, 0, 0};
+ {ok, Limit} when is_integer(Limit) andalso Limit > 0 ->
+ QueueCount = rabbit_amqqueue:count(VirtualHost),
+ case (AdditionalCount + QueueCount) > Limit of
+ false -> false;
+ true -> {true, Limit, QueueCount}
+ end;
+ {ok, Limit} when is_integer(Limit) andalso Limit < 0 ->
+ %% any negative value means "no limit". Note that parameter validation
+ %% will replace negative integers with 'undefined', so this is to be
+ %% explicit and extra defensive
+ false;
+ {ok, _Limit} ->
+ %% ignore non-integer limits
+ false
+ end.
+
+-spec is_over_queue_limit(vhost:name()) -> {true, non_neg_integer()} | false.
+
+is_over_queue_limit(VirtualHost) ->
+ case would_exceed_queue_limit(1, VirtualHost) of
+ {true, Limit, _QueueCount} -> {true, Limit};
+ false -> false
+ end.
+
+%%----------------------------------------------------------------------------
+
+parse_set(VHost, Defn, ActingUser) ->
+ Definition = rabbit_data_coercion:to_binary(Defn),
+ case rabbit_json:try_decode(Definition) of
+ {ok, Term} ->
+ set(VHost, maps:to_list(Term), ActingUser);
+ {error, Reason} ->
+ {error_string,
+ rabbit_misc:format("JSON decoding error. Reason: ~ts", [Reason])}
+ end.
+
+set(VHost, Defn, ActingUser) ->
+ rabbit_runtime_parameters:set_any(VHost, <<"vhost-limits">>,
+ <<"limits">>, Defn, ActingUser).
+
+clear(VHost, ActingUser) ->
+ rabbit_runtime_parameters:clear_any(VHost, <<"vhost-limits">>,
+ <<"limits">>, ActingUser).
+
+update_limit(VHost, Name, Value, ActingUser) ->
+ OldDef = case rabbit_runtime_parameters:list(VHost, <<"vhost-limits">>) of
+ [] -> [];
+ [Param] -> pget(value, Param, [])
+ end,
+ NewDef = [{Name, Value} | lists:keydelete(Name, 1, OldDef)],
+ set(VHost, NewDef, ActingUser).
+
+clear_limit(VHost, Name, ActingUser) ->
+ OldDef = case rabbit_runtime_parameters:list(VHost, <<"vhost-limits">>) of
+ [] -> [];
+ [Param] -> pget(value, Param, [])
+ end,
+ NewDef = lists:keydelete(Name, 1, OldDef),
+ set(VHost, NewDef, ActingUser).
+
+vhost_limit_validation() ->
+ [{<<"max-connections">>, fun rabbit_parameter_validation:integer/2, optional},
+ {<<"max-queues">>, fun rabbit_parameter_validation:integer/2, optional}].
+
+update_vhost(VHostName, Limits) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ rabbit_vhost:update(VHostName,
+ fun(VHost) ->
+ rabbit_vhost:set_limits(VHost, Limits)
+ end)
+ end),
+ ok.
+
+get_limit(VirtualHost, Limit) ->
+ case rabbit_runtime_parameters:list(VirtualHost, <<"vhost-limits">>) of
+ [] -> undefined;
+ [Param] -> case pget(value, Param) of
+ undefined -> undefined;
+ Val -> case pget(Limit, Val) of
+ undefined -> undefined;
+ %% no limit
+ N when N < 0 -> undefined;
+ N when N >= 0 -> {ok, N}
+ end
+ end
+ end.
diff --git a/deps/rabbit/src/rabbit_vhost_msg_store.erl b/deps/rabbit/src/rabbit_vhost_msg_store.erl
new file mode 100644
index 0000000000..8667b4d143
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_msg_store.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost_msg_store).
+
+-include("rabbit.hrl").
+
+-export([start/4, stop/2, client_init/5, successfully_recovered_state/2]).
+-export([vhost_store_pid/2]).
+
+start(VHost, Type, ClientRefs, StartupFunState) when is_list(ClientRefs);
+ ClientRefs == undefined ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ supervisor2:start_child(VHostSup,
+ {Type, {rabbit_msg_store, start_link,
+ [Type, VHostDir, ClientRefs, StartupFunState]},
+ transient, ?MSG_STORE_WORKER_WAIT, worker, [rabbit_msg_store]});
+ %% we can get here if a vhost is added and removed concurrently
+ %% e.g. some integration tests do it
+ {error, {no_such_vhost, VHost}} = E ->
+ rabbit_log:error("Failed to start a message store for vhost ~s: vhost no longer exists!",
+ [VHost]),
+ E
+ end.
+
+stop(VHost, Type) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ ok = supervisor2:terminate_child(VHostSup, Type),
+ ok = supervisor2:delete_child(VHostSup, Type);
+ %% see start/4
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to stop a message store for vhost ~s: vhost no longer exists!",
+ [VHost]),
+
+ ok
+ end.
+
+client_init(VHost, Type, Ref, MsgOnDiskFun, CloseFDsFun) ->
+ with_vhost_store(VHost, Type, fun(StorePid) ->
+ rabbit_msg_store:client_init(StorePid, Ref, MsgOnDiskFun, CloseFDsFun)
+ end).
+
+with_vhost_store(VHost, Type, Fun) ->
+ case vhost_store_pid(VHost, Type) of
+ no_pid ->
+ throw({message_store_not_started, Type, VHost});
+ Pid when is_pid(Pid) ->
+ Fun(Pid)
+ end.
+
+vhost_store_pid(VHost, Type) ->
+ {ok, VHostSup} = rabbit_vhost_sup_sup:get_vhost_sup(VHost),
+ case supervisor2:find_child(VHostSup, Type) of
+ [Pid] -> Pid;
+ [] -> no_pid
+ end.
+
+successfully_recovered_state(VHost, Type) ->
+ with_vhost_store(VHost, Type, fun(StorePid) ->
+ rabbit_msg_store:successfully_recovered_state(StorePid)
+ end).
diff --git a/deps/rabbit/src/rabbit_vhost_process.erl b/deps/rabbit/src/rabbit_vhost_process.erl
new file mode 100644
index 0000000000..cf70d49010
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_process.erl
@@ -0,0 +1,96 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module implements a vhost identity process.
+
+%% On start this process will try to recover the vhost data and
+%% processes structure (queues and message stores).
+%% If recovered successfully, the process will save it's PID
+%% to vhost process registry. If vhost process PID is in the registry and the
+%% process is alive - the vhost is considered running.
+
+%% On termination, the ptocess will notify of vhost going down.
+
+%% The process will also check periodically if the vhost still
+%% present in mnesia DB and stop the vhost supervision tree when it
+%% disappears.
+
+-module(rabbit_vhost_process).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-include("rabbit.hrl").
+
+-define(TICKTIME_RATIO, 4).
+
+-behaviour(gen_server2).
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+start_link(VHost) ->
+ gen_server2:start_link(?MODULE, [VHost], []).
+
+
+init([VHost]) ->
+ process_flag(trap_exit, true),
+ rabbit_log:debug("Recovering data for VHost ~p~n", [VHost]),
+ try
+ %% Recover the vhost data and save it to vhost registry.
+ ok = rabbit_vhost:recover(VHost),
+ rabbit_vhost_sup_sup:save_vhost_process(VHost, self()),
+ Interval = interval(),
+ timer:send_interval(Interval, check_vhost),
+ true = erlang:garbage_collect(),
+ {ok, VHost}
+ catch _:Reason:Stacktrace ->
+ rabbit_amqqueue:mark_local_durable_queues_stopped(VHost),
+ rabbit_log:error("Unable to recover vhost ~p data. Reason ~p~n"
+ " Stacktrace ~p",
+ [VHost, Reason, Stacktrace]),
+ {stop, Reason}
+ end.
+
+handle_call(_,_,VHost) ->
+ {reply, ok, VHost}.
+
+handle_cast(_, VHost) ->
+ {noreply, VHost}.
+
+handle_info(check_vhost, VHost) ->
+ case rabbit_vhost:exists(VHost) of
+ true -> {noreply, VHost};
+ false ->
+ rabbit_log:warning("Virtual host '~s' is gone. "
+ "Stopping its top level supervisor.",
+ [VHost]),
+ %% Stop vhost's top supervisor in a one-off process to avoid a deadlock:
+ %% us (a child process) waiting for supervisor shutdown and our supervisor(s)
+ %% waiting for us to shutdown.
+ spawn(
+ fun() ->
+ rabbit_vhost_sup_sup:stop_and_delete_vhost(VHost)
+ end),
+ {noreply, VHost}
+ end;
+handle_info(_, VHost) ->
+ {noreply, VHost}.
+
+terminate(shutdown, VHost) ->
+ %% Notify that vhost is stopped.
+ rabbit_vhost:vhost_down(VHost);
+terminate(_, _VHost) ->
+ ok.
+
+code_change(_OldVsn, VHost, _Extra) ->
+ {ok, VHost}.
+
+interval() ->
+ application:get_env(kernel, net_ticktime, 60000) * ?TICKTIME_RATIO.
diff --git a/deps/rabbit/src/rabbit_vhost_sup.erl b/deps/rabbit/src/rabbit_vhost_sup.erl
new file mode 100644
index 0000000000..d82d827ecf
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_sup.erl
@@ -0,0 +1,22 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost_sup).
+
+-include("rabbit.hrl").
+
+%% Each vhost gets an instance of this supervisor that supervises
+%% message stores and queues (via rabbit_amqqueue_sup_sup).
+-behaviour(supervisor2).
+-export([init/1]).
+-export([start_link/1]).
+
+start_link(VHost) ->
+ supervisor2:start_link(?MODULE, [VHost]).
+
+init([_VHost]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/rabbit/src/rabbit_vhost_sup_sup.erl b/deps/rabbit/src/rabbit_vhost_sup_sup.erl
new file mode 100644
index 0000000000..c201237daa
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_sup_sup.erl
@@ -0,0 +1,271 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost_sup_sup).
+
+-include("rabbit.hrl").
+
+-behaviour(supervisor2).
+
+-export([init/1]).
+
+-export([start_link/0, start/0]).
+-export([init_vhost/1,
+ start_vhost/1, start_vhost/2,
+ get_vhost_sup/1, get_vhost_sup/2,
+ save_vhost_sup/3,
+ save_vhost_process/2]).
+-export([delete_on_all_nodes/1, start_on_all_nodes/1]).
+-export([is_vhost_alive/1]).
+-export([check/0]).
+
+%% Internal
+-export([stop_and_delete_vhost/1]).
+
+-record(vhost_sup, {vhost, vhost_sup_pid, wrapper_pid, vhost_process_pid}).
+
+start() ->
+ case supervisor:start_child(rabbit_sup, {?MODULE,
+ {?MODULE, start_link, []},
+ permanent, infinity, supervisor,
+ [?MODULE]}) of
+ {ok, _} -> ok;
+ {error, Err} -> {error, Err}
+ end.
+
+start_link() ->
+ supervisor2:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ %% This assumes that a single vhost termination should not shut down nodes
+ %% unless the operator opts in.
+ RestartStrategy = vhost_restart_strategy(),
+ ets:new(?MODULE, [named_table, public, {keypos, #vhost_sup.vhost}]),
+ {ok, {{simple_one_for_one, 0, 5},
+ [{rabbit_vhost, {rabbit_vhost_sup_wrapper, start_link, []},
+ RestartStrategy, ?SUPERVISOR_WAIT, supervisor,
+ [rabbit_vhost_sup_wrapper, rabbit_vhost_sup]}]}}.
+
+start_on_all_nodes(VHost) ->
+ %% Do not try to start a vhost on booting peer nodes
+ AllBooted = [Node || Node <- rabbit_nodes:all_running(), rabbit:is_booted(Node)],
+ Nodes = [node() | AllBooted],
+ Results = [{Node, start_vhost(VHost, Node)} || Node <- Nodes],
+ Failures = lists:filter(fun
+ ({_, {ok, _}}) -> false;
+ ({_, {error, {already_started, _}}}) -> false;
+ (_) -> true
+ end,
+ Results),
+ case Failures of
+ [] -> ok;
+ Errors -> {error, {failed_to_start_vhost_on_nodes, Errors}}
+ end.
+
+delete_on_all_nodes(VHost) ->
+ [ stop_and_delete_vhost(VHost, Node) || Node <- rabbit_nodes:all_running() ],
+ ok.
+
+stop_and_delete_vhost(VHost) ->
+ StopResult = case lookup_vhost_sup_record(VHost) of
+ not_found -> ok;
+ #vhost_sup{wrapper_pid = WrapperPid,
+ vhost_sup_pid = VHostSupPid} ->
+ case is_process_alive(WrapperPid) of
+ false -> ok;
+ true ->
+ rabbit_log:info("Stopping vhost supervisor ~p"
+ " for vhost '~s'~n",
+ [VHostSupPid, VHost]),
+ case supervisor2:terminate_child(?MODULE, WrapperPid) of
+ ok ->
+ true = ets:delete(?MODULE, VHost),
+ ok;
+ Other ->
+ Other
+ end
+ end
+ end,
+ ok = rabbit_vhost:delete_storage(VHost),
+ StopResult.
+
+%% We take an optimistic approach whan stopping a remote VHost supervisor.
+stop_and_delete_vhost(VHost, Node) when Node == node(self()) ->
+ stop_and_delete_vhost(VHost);
+stop_and_delete_vhost(VHost, Node) ->
+ case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, stop_and_delete_vhost, [VHost]) of
+ ok -> ok;
+ {badrpc, RpcErr} ->
+ rabbit_log:error("Failed to stop and delete a vhost ~p"
+ " on node ~p."
+ " Reason: ~p",
+ [VHost, Node, RpcErr]),
+ {error, RpcErr}
+ end.
+
+-spec init_vhost(rabbit_types:vhost()) -> ok | {error, {no_such_vhost, rabbit_types:vhost()}}.
+init_vhost(VHost) ->
+ case start_vhost(VHost) of
+ {ok, _} -> ok;
+ {error, {already_started, _}} ->
+ rabbit_log:warning(
+ "Attempting to start an already started vhost '~s'.",
+ [VHost]),
+ ok;
+ {error, {no_such_vhost, VHost}} ->
+ {error, {no_such_vhost, VHost}};
+ {error, Reason} ->
+ case vhost_restart_strategy() of
+ permanent ->
+ rabbit_log:error(
+ "Unable to initialize vhost data store for vhost '~s'."
+ " Reason: ~p",
+ [VHost, Reason]),
+ throw({error, Reason});
+ transient ->
+ rabbit_log:warning(
+ "Unable to initialize vhost data store for vhost '~s'."
+ " The vhost will be stopped for this node. "
+ " Reason: ~p",
+ [VHost, Reason]),
+ ok
+ end
+ end.
+
+-type vhost_error() :: {no_such_vhost, rabbit_types:vhost()} |
+ {vhost_supervisor_not_running, rabbit_types:vhost()}.
+
+-spec get_vhost_sup(rabbit_types:vhost(), node()) -> {ok, pid()} | {error, vhost_error() | term()}.
+get_vhost_sup(VHost, Node) ->
+ case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, get_vhost_sup, [VHost]) of
+ {ok, Pid} when is_pid(Pid) ->
+ {ok, Pid};
+ {error, Err} ->
+ {error, Err};
+ {badrpc, RpcErr} ->
+ {error, RpcErr}
+ end.
+
+-spec get_vhost_sup(rabbit_types:vhost()) -> {ok, pid()} | {error, vhost_error()}.
+get_vhost_sup(VHost) ->
+ case rabbit_vhost:exists(VHost) of
+ false ->
+ {error, {no_such_vhost, VHost}};
+ true ->
+ case vhost_sup_pid(VHost) of
+ no_pid ->
+ {error, {vhost_supervisor_not_running, VHost}};
+ {ok, Pid} when is_pid(Pid) ->
+ {ok, Pid}
+ end
+ end.
+
+-spec start_vhost(rabbit_types:vhost(), node()) -> {ok, pid()} | {error, term()}.
+start_vhost(VHost, Node) ->
+ case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, start_vhost, [VHost]) of
+ {ok, Pid} -> {ok, Pid};
+ {error, Err} -> {error, Err};
+ {badrpc, RpcErr} -> {error, RpcErr}
+ end.
+
+-spec start_vhost(rabbit_types:vhost()) -> {ok, pid()} | {error, term()}.
+start_vhost(VHost) ->
+ case rabbit_vhost:exists(VHost) of
+ false -> {error, {no_such_vhost, VHost}};
+ true ->
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ supervisor2:start_child(?MODULE, [VHost]);
+ undefined ->
+ {error, rabbit_vhost_sup_sup_not_running}
+ end
+ end.
+
+-spec is_vhost_alive(rabbit_types:vhost()) -> boolean().
+is_vhost_alive(VHost) ->
+%% A vhost is considered alive if it's supervision tree is alive and
+%% saved in the ETS table
+ case lookup_vhost_sup_record(VHost) of
+ #vhost_sup{wrapper_pid = WrapperPid,
+ vhost_sup_pid = VHostSupPid,
+ vhost_process_pid = VHostProcessPid}
+ when is_pid(WrapperPid),
+ is_pid(VHostSupPid),
+ is_pid(VHostProcessPid) ->
+ is_process_alive(WrapperPid)
+ andalso
+ is_process_alive(VHostSupPid)
+ andalso
+ is_process_alive(VHostProcessPid);
+ _ -> false
+ end.
+
+
+-spec save_vhost_sup(rabbit_types:vhost(), pid(), pid()) -> ok.
+save_vhost_sup(VHost, WrapperPid, VHostPid) ->
+ true = ets:insert(?MODULE, #vhost_sup{vhost = VHost,
+ vhost_sup_pid = VHostPid,
+ wrapper_pid = WrapperPid}),
+ ok.
+
+-spec save_vhost_process(rabbit_types:vhost(), pid()) -> ok.
+save_vhost_process(VHost, VHostProcessPid) ->
+ true = ets:update_element(?MODULE, VHost,
+ {#vhost_sup.vhost_process_pid, VHostProcessPid}),
+ ok.
+
+-spec lookup_vhost_sup_record(rabbit_types:vhost()) -> #vhost_sup{} | not_found.
+lookup_vhost_sup_record(VHost) ->
+ case ets:info(?MODULE, name) of
+ ?MODULE ->
+ case ets:lookup(?MODULE, VHost) of
+ [] -> not_found;
+ [#vhost_sup{} = VHostSup] -> VHostSup
+ end;
+ undefined -> not_found
+ end.
+
+-spec vhost_sup_pid(rabbit_types:vhost()) -> no_pid | {ok, pid()}.
+vhost_sup_pid(VHost) ->
+ case lookup_vhost_sup_record(VHost) of
+ not_found ->
+ no_pid;
+ #vhost_sup{vhost_sup_pid = Pid} = VHostSup ->
+ case erlang:is_process_alive(Pid) of
+ true -> {ok, Pid};
+ false ->
+ ets:delete_object(?MODULE, VHostSup),
+ no_pid
+ end
+ end.
+
+vhost_restart_strategy() ->
+ %% This assumes that a single vhost termination should not shut down nodes
+ %% unless the operator opts in.
+ case application:get_env(rabbit, vhost_restart_strategy, continue) of
+ continue -> transient;
+ stop_node -> permanent;
+ transient -> transient;
+ permanent -> permanent
+ end.
+
+check() ->
+ VHosts = rabbit_vhost:list_names(),
+ lists:filter(
+ fun(V) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(V) of
+ {ok, Sup} ->
+ MsgStores = [Pid || {Name, Pid, _, _} <- supervisor:which_children(Sup),
+ lists:member(Name, [msg_store_persistent,
+ msg_store_transient])],
+ not is_vhost_alive(V) orelse (not lists:all(fun(P) ->
+ erlang:is_process_alive(P)
+ end, MsgStores));
+ {error, _} ->
+ true
+ end
+ end, VHosts).
diff --git a/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl b/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl
new file mode 100644
index 0000000000..ed239ade69
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module is a wrapper around vhost supervisor to
+%% provide exactly once restart semantics.
+
+-module(rabbit_vhost_sup_wrapper).
+
+-include("rabbit.hrl").
+
+-behaviour(supervisor2).
+-export([init/1]).
+-export([start_link/1]).
+-export([start_vhost_sup/1]).
+
+start_link(VHost) ->
+ %% Using supervisor, because supervisor2 does not stop a started child when
+ %% another one fails to start. Bug?
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, Pid} ->
+ {error, {already_started, Pid}};
+ {error, _} ->
+ supervisor:start_link(?MODULE, [VHost])
+ end.
+
+init([VHost]) ->
+ %% 2 restarts in 5 minutes. One per message store.
+ {ok, {{one_for_all, 2, 300},
+ [
+ %% rabbit_vhost_sup is an empty supervisor container for
+ %% all data processes.
+ {rabbit_vhost_sup,
+ {rabbit_vhost_sup_wrapper, start_vhost_sup, [VHost]},
+ permanent, infinity, supervisor,
+ [rabbit_vhost_sup]},
+ %% rabbit_vhost_process is a vhost identity process, which
+ %% is responsible for data recovery and vhost aliveness status.
+ %% See the module comments for more info.
+ {rabbit_vhost_process,
+ {rabbit_vhost_process, start_link, [VHost]},
+ permanent, ?WORKER_WAIT, worker,
+ [rabbit_vhost_process]}]}}.
+
+
+start_vhost_sup(VHost) ->
+ case rabbit_vhost_sup:start_link(VHost) of
+ {ok, Pid} ->
+ %% Save vhost sup record with wrapper pid and vhost sup pid.
+ ok = rabbit_vhost_sup_sup:save_vhost_sup(VHost, self(), Pid),
+ {ok, Pid};
+ Other ->
+ Other
+ end.
diff --git a/deps/rabbit/src/rabbit_vm.erl b/deps/rabbit/src/rabbit_vm.erl
new file mode 100644
index 0000000000..b014e090c5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vm.erl
@@ -0,0 +1,427 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vm).
+
+-export([memory/0, binary/0, ets_tables_memory/1]).
+
+-define(MAGIC_PLUGINS, ["cowboy", "ranch", "sockjs"]).
+
+%%----------------------------------------------------------------------------
+
+-spec memory() -> rabbit_types:infos().
+
+memory() ->
+ All = interesting_sups(),
+ {Sums, _Other} = sum_processes(
+ lists:append(All), distinguishers(), [memory]),
+
+ [Qs, QsSlave, Qqs, Ssqs, Srqs, SCoor, ConnsReader, ConnsWriter, ConnsChannel,
+ ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] =
+ [aggregate(Names, Sums, memory, fun (X) -> X end)
+ || Names <- distinguished_interesting_sups()],
+
+ MnesiaETS = mnesia_memory(),
+ MsgIndexETS = ets_memory(msg_stores()),
+ MetricsETS = ets_memory([rabbit_metrics]),
+ QuorumETS = ets_memory([ra_log_ets]),
+ MetricsProc = try
+ [{_, M}] = process_info(whereis(rabbit_metrics), [memory]),
+ M
+ catch
+ error:badarg ->
+ 0
+ end,
+ MgmtDbETS = ets_memory([rabbit_mgmt_storage]),
+ [{total, ErlangTotal},
+ {processes, Processes},
+ {ets, ETS},
+ {atom, Atom},
+ {binary, Bin},
+ {code, Code},
+ {system, System}] =
+ erlang:memory([total, processes, ets, atom, binary, code, system]),
+
+ Strategy = vm_memory_monitor:get_memory_calculation_strategy(),
+ Allocated = recon_alloc:memory(allocated),
+ Rss = vm_memory_monitor:get_rss_memory(),
+
+ AllocatedUnused = max(Allocated - ErlangTotal, 0),
+ OSReserved = max(Rss - Allocated, 0),
+
+ OtherProc = Processes
+ - ConnsReader - ConnsWriter - ConnsChannel - ConnsOther
+ - Qs - QsSlave - Qqs - Ssqs - Srqs - SCoor - MsgIndexProc - Plugins
+ - MgmtDbProc - MetricsProc,
+
+ [
+ %% Connections
+ {connection_readers, ConnsReader},
+ {connection_writers, ConnsWriter},
+ {connection_channels, ConnsChannel},
+ {connection_other, ConnsOther},
+
+ %% Queues
+ {queue_procs, Qs},
+ {queue_slave_procs, QsSlave},
+ {quorum_queue_procs, Qqs},
+ {stream_queue_procs, Ssqs},
+ {stream_queue_replica_reader_procs, Srqs},
+ {stream_queue_coordinator_procs, SCoor},
+
+ %% Processes
+ {plugins, Plugins},
+ {other_proc, lists:max([0, OtherProc])}, %% [1]
+
+ %% Metrics
+ {metrics, MetricsETS + MetricsProc},
+ {mgmt_db, MgmtDbETS + MgmtDbProc},
+
+ %% ETS
+ {mnesia, MnesiaETS},
+ {quorum_ets, QuorumETS},
+ {other_ets, ETS - MnesiaETS - MetricsETS - MgmtDbETS - MsgIndexETS - QuorumETS},
+
+ %% Messages (mostly, some binaries are not messages)
+ {binary, Bin},
+ {msg_index, MsgIndexETS + MsgIndexProc},
+
+ %% System
+ {code, Code},
+ {atom, Atom},
+ {other_system, System - ETS - Bin - Code - Atom},
+ {allocated_unused, AllocatedUnused},
+ {reserved_unallocated, OSReserved},
+ {strategy, Strategy},
+ {total, [{erlang, ErlangTotal},
+ {rss, Rss},
+ {allocated, Allocated}]}
+ ].
+%% [1] - erlang:memory(processes) can be less than the sum of its
+%% parts. Rather than display something nonsensical, just silence any
+%% claims about negative memory. See
+%% http://erlang.org/pipermail/erlang-questions/2012-September/069320.html
+
+-spec binary() -> rabbit_types:infos().
+
+binary() ->
+ All = interesting_sups(),
+ {Sums, Rest} =
+ sum_processes(
+ lists:append(All),
+ fun (binary, Info, Acc) ->
+ lists:foldl(fun ({Ptr, Sz, _RefCnt}, Acc0) ->
+ sets:add_element({Ptr, Sz}, Acc0)
+ end, Acc, Info)
+ end, distinguishers(), [{binary, sets:new()}]),
+ [Other, Qs, QsSlave, Qqs, Ssqs, Srqs, Scoor, ConnsReader, ConnsWriter,
+ ConnsChannel, ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] =
+ [aggregate(Names, [{other, Rest} | Sums], binary, fun sum_binary/1)
+ || Names <- [[other] | distinguished_interesting_sups()]],
+ [{connection_readers, ConnsReader},
+ {connection_writers, ConnsWriter},
+ {connection_channels, ConnsChannel},
+ {connection_other, ConnsOther},
+ {queue_procs, Qs},
+ {queue_slave_procs, QsSlave},
+ {quorum_queue_procs, Qqs},
+ {stream_queue_procs, Ssqs},
+ {stream_queue_replica_reader_procs, Srqs},
+ {stream_queue_coordinator_procs, Scoor},
+ {plugins, Plugins},
+ {mgmt_db, MgmtDbProc},
+ {msg_index, MsgIndexProc},
+ {other, Other}].
+
+%%----------------------------------------------------------------------------
+
+mnesia_memory() ->
+ case mnesia:system_info(is_running) of
+ yes -> lists:sum([bytes(mnesia:table_info(Tab, memory)) ||
+ Tab <- mnesia:system_info(tables)]);
+ _ -> 0
+ end.
+
+ets_memory(Owners) ->
+ lists:sum([V || {_K, V} <- ets_tables_memory(Owners)]).
+
+-spec ets_tables_memory(Owners) -> rabbit_types:infos()
+ when Owners :: all | OwnerProcessName | [OwnerProcessName],
+ OwnerProcessName :: atom().
+
+ets_tables_memory(all) ->
+ [{ets:info(T, name), bytes(ets:info(T, memory))}
+ || T <- ets:all(),
+ is_atom(T)];
+ets_tables_memory(OwnerName) when is_atom(OwnerName) ->
+ ets_tables_memory([OwnerName]);
+ets_tables_memory(Owners) when is_list(Owners) ->
+ OwnerPids = lists:map(fun(O) when is_pid(O) -> O;
+ (O) when is_atom(O) -> whereis(O)
+ end,
+ Owners),
+ [{ets:info(T, name), bytes(ets:info(T, memory))}
+ || T <- ets:all(),
+ lists:member(ets:info(T, owner), OwnerPids)].
+
+bytes(Words) -> try
+ Words * erlang:system_info(wordsize)
+ catch
+ _:_ -> 0
+ end.
+
+interesting_sups() ->
+ [queue_sups(), quorum_sups(), stream_server_sups(), stream_reader_sups(),
+ conn_sups() | interesting_sups0()].
+
+queue_sups() ->
+ all_vhosts_children(rabbit_amqqueue_sup_sup).
+
+quorum_sups() ->
+ %% TODO: in the future not all ra servers may be queues and we needs
+ %% some way to filter this
+ case whereis(ra_server_sup_sup) of
+ undefined ->
+ [];
+ _ ->
+ [Pid || {_, Pid, _, _} <-
+ supervisor:which_children(ra_server_sup_sup)]
+ end.
+
+stream_server_sups() -> [osiris_server_sup].
+stream_reader_sups() -> [osiris_replica_reader_sup].
+
+msg_stores() ->
+ all_vhosts_children(msg_store_transient)
+ ++
+ all_vhosts_children(msg_store_persistent).
+
+all_vhosts_children(Name) ->
+ case whereis(rabbit_vhost_sup_sup) of
+ undefined -> [];
+ Pid when is_pid(Pid) ->
+ lists:filtermap(
+ fun({_, VHostSupWrapper, _, _}) ->
+ case supervisor2:find_child(VHostSupWrapper,
+ rabbit_vhost_sup) of
+ [] -> false;
+ [VHostSup] ->
+ case supervisor2:find_child(VHostSup, Name) of
+ [QSup] -> {true, QSup};
+ [] -> false
+ end
+ end
+ end,
+ supervisor:which_children(rabbit_vhost_sup_sup))
+ end.
+
+interesting_sups0() ->
+ MsgIndexProcs = msg_stores(),
+ MgmtDbProcs = [rabbit_mgmt_sup_sup],
+ PluginProcs = plugin_sups(),
+ [MsgIndexProcs, MgmtDbProcs, PluginProcs].
+
+conn_sups() ->
+ Ranches = lists:flatten(ranch_server_sups()),
+ [amqp_sup|Ranches].
+
+ranch_server_sups() ->
+ try
+ ets:match(ranch_server, {{conns_sup, '_'}, '$1'})
+ catch
+ %% Ranch ETS table doesn't exist yet
+ error:badarg -> []
+ end.
+
+with(Sups, With) -> [{Sup, With} || Sup <- Sups].
+
+distinguishers() -> with(queue_sups(), fun queue_type/1) ++
+ with(conn_sups(), fun conn_type/1) ++
+ with(quorum_sups(), fun ra_type/1).
+
+distinguished_interesting_sups() ->
+ [
+ with(queue_sups(), master),
+ with(queue_sups(), slave),
+ with(quorum_sups(), quorum),
+ stream_server_sups(),
+ stream_reader_sups(),
+ with(quorum_sups(), stream),
+ with(conn_sups(), reader),
+ with(conn_sups(), writer),
+ with(conn_sups(), channel),
+ with(conn_sups(), other)]
+ ++ interesting_sups0().
+
+plugin_sups() ->
+ lists:append([plugin_sup(App) ||
+ {App, _, _} <- rabbit_misc:which_applications(),
+ is_plugin(atom_to_list(App))]).
+
+plugin_sup(App) ->
+ case application_controller:get_master(App) of
+ undefined -> [];
+ Master -> case application_master:get_child(Master) of
+ {Pid, _} when is_pid(Pid) -> [process_name(Pid)];
+ Pid when is_pid(Pid) -> [process_name(Pid)];
+ _ -> []
+ end
+ end.
+
+process_name(Pid) ->
+ case process_info(Pid, registered_name) of
+ {registered_name, Name} -> Name;
+ _ -> Pid
+ end.
+
+is_plugin("rabbitmq_" ++ _) -> true;
+is_plugin(App) -> lists:member(App, ?MAGIC_PLUGINS).
+
+aggregate(Names, Sums, Key, Fun) ->
+ lists:sum([extract(Name, Sums, Key, Fun) || Name <- Names]).
+
+extract(Name, Sums, Key, Fun) ->
+ case keyfind(Name, Sums) of
+ {value, Accs} -> Fun(keyfetch(Key, Accs));
+ false -> 0
+ end.
+
+sum_binary(Set) ->
+ sets:fold(fun({_Pt, Sz}, Acc) -> Acc + Sz end, 0, Set).
+
+queue_type(PDict) ->
+ case keyfind(process_name, PDict) of
+ {value, {rabbit_mirror_queue_slave, _}} -> slave;
+ _ -> master
+ end.
+
+conn_type(PDict) ->
+ case keyfind(process_name, PDict) of
+ {value, {rabbit_reader, _}} -> reader;
+ {value, {rabbit_writer, _}} -> writer;
+ {value, {rabbit_channel, _}} -> channel;
+ _ -> other
+ end.
+
+ra_type(PDict) ->
+ case keyfind('$rabbit_vm_category', PDict) of
+ {value, rabbit_stream_coordinator} -> stream;
+ _ -> quorum
+ end.
+
+%%----------------------------------------------------------------------------
+
+%% NB: this code is non-rabbit specific.
+
+-type process() :: pid() | atom().
+-type info_key() :: atom().
+-type info_value() :: any().
+-type info_item() :: {info_key(), info_value()}.
+-type accumulate() :: fun ((info_key(), info_value(), info_value()) ->
+ info_value()).
+-type distinguisher() :: fun (([{term(), term()}]) -> atom()).
+-type distinguishers() :: [{info_key(), distinguisher()}].
+-spec sum_processes([process()], distinguishers(), [info_key()]) ->
+ {[{process(), [info_item()]}], [info_item()]}.
+-spec sum_processes([process()], accumulate(), distinguishers(),
+ [info_item()]) ->
+ {[{process(), [info_item()]}], [info_item()]}.
+
+sum_processes(Names, Distinguishers, Items) ->
+ sum_processes(Names, fun (_, X, Y) -> X + Y end, Distinguishers,
+ [{Item, 0} || Item <- Items]).
+
+%% summarize the process_info of all processes based on their
+%% '$ancestor' hierarchy, recorded in their process dictionary.
+%%
+%% The function takes
+%%
+%% 1) a list of names/pids of processes that are accumulation points
+%% in the hierarchy.
+%%
+%% 2) a function that aggregates individual info items -taking the
+%% info item key, value and accumulated value as the input and
+%% producing a new accumulated value.
+%%
+%% 3) a list of info item key / initial accumulator value pairs.
+%%
+%% The process_info of a process is accumulated at the nearest of its
+%% ancestors that is mentioned in the first argument, or, if no such
+%% ancestor exists or the ancestor information is absent, in a special
+%% 'other' bucket.
+%%
+%% The result is a pair consisting of
+%%
+%% 1) a k/v list, containing for each of the accumulation names/pids a
+%% list of info items, containing the accumulated data, and
+%%
+%% 2) the 'other' bucket - a list of info items containing the
+%% accumulated data of all processes with no matching ancestors
+%%
+%% Note that this function operates on names as well as pids, but
+%% these must match whatever is contained in the '$ancestor' process
+%% dictionary entry. Generally that means for all registered processes
+%% the name should be used.
+sum_processes(Names, Fun, Distinguishers, Acc0) ->
+ Items = [Item || {Item, _Blank0} <- Acc0],
+ {NameAccs, OtherAcc} =
+ lists:foldl(
+ fun (Pid, Acc) ->
+ InfoItems = [registered_name, dictionary | Items],
+ case process_info(Pid, InfoItems) of
+ undefined ->
+ Acc;
+ [{registered_name, RegName}, {dictionary, D} | Vals] ->
+ %% see docs for process_info/2 for the
+ %% special handling of 'registered_name'
+ %% info items
+ Extra = case RegName of
+ [] -> [];
+ N -> [N]
+ end,
+ Name0 = find_ancestor(Extra, D, Names),
+ Name = case keyfind(Name0, Distinguishers) of
+ {value, DistFun} -> {Name0, DistFun(D)};
+ false -> Name0
+ end,
+ accumulate(
+ Name, Fun, orddict:from_list(Vals), Acc, Acc0)
+ end
+ end, {orddict:new(), Acc0}, processes()),
+ %% these conversions aren't strictly necessary; we do them simply
+ %% for the sake of encapsulating the representation.
+ {[{Name, orddict:to_list(Accs)} ||
+ {Name, Accs} <- orddict:to_list(NameAccs)],
+ orddict:to_list(OtherAcc)}.
+
+find_ancestor(Extra, D, Names) ->
+ Ancestors = case keyfind('$ancestors', D) of
+ {value, Ancs} -> Ancs;
+ false -> []
+ end,
+ case lists:splitwith(fun (A) -> not lists:member(A, Names) end,
+ Extra ++ Ancestors) of
+ {_, []} -> undefined;
+ {_, [Name | _]} -> Name
+ end.
+
+accumulate(undefined, Fun, ValsDict, {NameAccs, OtherAcc}, _Acc0) ->
+ {NameAccs, orddict:merge(Fun, ValsDict, OtherAcc)};
+accumulate(Name, Fun, ValsDict, {NameAccs, OtherAcc}, Acc0) ->
+ F = fun (NameAcc) -> orddict:merge(Fun, ValsDict, NameAcc) end,
+ {case orddict:is_key(Name, NameAccs) of
+ true -> orddict:update(Name, F, NameAccs);
+ false -> orddict:store( Name, F(Acc0), NameAccs)
+ end, OtherAcc}.
+
+keyfetch(K, L) -> {value, {_, V}} = lists:keysearch(K, 1, L),
+ V.
+
+keyfind(K, L) -> case lists:keysearch(K, 1, L) of
+ {value, {_, V}} -> {value, V};
+ false -> false
+ end.
diff --git a/deps/rabbit/src/supervised_lifecycle.erl b/deps/rabbit/src/supervised_lifecycle.erl
new file mode 100644
index 0000000000..0e1bb9b5c8
--- /dev/null
+++ b/deps/rabbit/src/supervised_lifecycle.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Invoke callbacks on startup and termination.
+%%
+%% Simply hook this process into a supervision hierarchy, to have the
+%% callbacks invoked at a precise point during the establishment and
+%% teardown of that hierarchy, respectively.
+%%
+%% Or launch the process independently, and link to it, to have the
+%% callbacks invoked on startup and when the linked process
+%% terminates, respectively.
+
+-module(supervised_lifecycle).
+
+-behavior(gen_server).
+
+-export([start_link/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(atom(), rabbit_types:mfargs(), rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Name, StartMFA, StopMFA) ->
+ gen_server:start_link({local, Name}, ?MODULE, [StartMFA, StopMFA], []).
+
+%%----------------------------------------------------------------------------
+
+init([{M, F, A}, StopMFA]) ->
+ process_flag(trap_exit, true),
+ apply(M, F, A),
+ {ok, StopMFA}.
+
+handle_call(_Request, _From, State) -> {noreply, State}.
+
+handle_cast(_Msg, State) -> {noreply, State}.
+
+handle_info(_Info, State) -> {noreply, State}.
+
+terminate(_Reason, {M, F, A}) ->
+ apply(M, F, A),
+ ok.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
diff --git a/deps/rabbit/src/tcp_listener.erl b/deps/rabbit/src/tcp_listener.erl
new file mode 100644
index 0000000000..93c24ab397
--- /dev/null
+++ b/deps/rabbit/src/tcp_listener.erl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(tcp_listener).
+
+%% Represents a running TCP listener (a process that listens for inbound
+%% TCP or TLS connections). Every protocol supported typically has one
+%% or two listeners, plain TCP and (optionally) TLS, but there can
+%% be more, e.g. when multiple network interfaces are involved.
+%%
+%% A listener has 6 properties (is a tuple of 6):
+%%
+%% * IP address
+%% * Port
+%% * Node
+%% * Label (human-friendly name, e.g. AMQP 0-9-1)
+%% * Startup callback
+%% * Shutdown callback
+%%
+%% Listeners use Ranch in embedded mode to accept and "bridge" client
+%% connections with protocol entry points such as rabbit_reader.
+%%
+%% Listeners are tracked in a Mnesia table so that they can be
+%%
+%% * Shut down
+%% * Listed (e.g. in the management UI)
+%%
+%% Every tcp_listener process has callbacks that are executed on start
+%% and termination. Those must take care of listener registration
+%% among other things.
+%%
+%% Listeners are supervised by tcp_listener_sup (one supervisor per protocol).
+%%
+%% See also rabbit_networking and tcp_listener_sup.
+
+-behaviour(gen_server).
+
+-export([start_link/5]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {on_startup, on_shutdown, label, ip, port}).
+
+%%----------------------------------------------------------------------------
+
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link
+ (inet:ip_address(), inet:port_number(),
+ mfargs(), mfargs(), string()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(IPAddress, Port,
+ OnStartup, OnShutdown, Label) ->
+ gen_server:start_link(
+ ?MODULE, {IPAddress, Port,
+ OnStartup, OnShutdown, Label}, []).
+
+%%--------------------------------------------------------------------
+
+init({IPAddress, Port, {M,F,A} = OnStartup, OnShutdown, Label}) ->
+ process_flag(trap_exit, true),
+ error_logger:info_msg(
+ "started ~s on ~s:~p~n",
+ [Label, rabbit_misc:ntoab(IPAddress), Port]),
+ apply(M, F, A ++ [IPAddress, Port]),
+ {ok, #state{on_startup = OnStartup, on_shutdown = OnShutdown,
+ label = Label, ip=IPAddress, port=Port}}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, #state{on_shutdown = {M,F,A}, label=Label, ip=IPAddress, port=Port}) ->
+ error_logger:info_msg("stopped ~s on ~s:~p~n",
+ [Label, rabbit_misc:ntoab(IPAddress), Port]),
+ apply(M, F, A ++ [IPAddress, Port]).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/tcp_listener_sup.erl b/deps/rabbit/src/tcp_listener_sup.erl
new file mode 100644
index 0000000000..82128bb2af
--- /dev/null
+++ b/deps/rabbit/src/tcp_listener_sup.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(tcp_listener_sup).
+
+%% Supervises TCP listeners. There is a separate supervisor for every
+%% protocol. In case of AMQP 0-9-1, it resides under rabbit_sup. Plugins
+%% that provide protocol support (e.g. STOMP) have an instance of this supervisor in their
+%% app supervision tree.
+%%
+%% See also rabbit_networking and tcp_listener.
+
+-behaviour(supervisor).
+
+-export([start_link/10]).
+-export([init/1]).
+
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link
+ (inet:ip_address(), inet:port_number(), module(), [gen_tcp:listen_option()],
+ module(), any(), mfargs(), mfargs(), integer(), string()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label) ->
+ supervisor:start_link(
+ ?MODULE, {IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label}).
+
+init({IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label}) ->
+ {ok, AckTimeout} = application:get_env(rabbit, ssl_handshake_timeout),
+ MaxConnections = rabbit_misc:get_env(rabbit, connection_max, infinity),
+ RanchListenerOpts = #{
+ num_acceptors => ConcurrentAcceptorCount,
+ max_connections => MaxConnections,
+ handshake_timeout => AckTimeout,
+ connection_type => supervisor,
+ socket_opts => [{ip, IPAddress},
+ {port, Port} |
+ SocketOpts]
+ },
+ Flags = {one_for_all, 10, 10},
+ OurChildSpecStart = {tcp_listener, start_link, [IPAddress, Port, OnStartup, OnShutdown, Label]},
+ OurChildSpec = {tcp_listener, OurChildSpecStart, transient, 16#ffffffff, worker, [tcp_listener]},
+ RanchChildSpec = ranch:child_spec(rabbit_networking:ranch_ref(IPAddress, Port),
+ Transport, RanchListenerOpts,
+ ProtoSup, ProtoOpts),
+ {ok, {Flags, [RanchChildSpec, OurChildSpec]}}.
diff --git a/deps/rabbit/src/term_to_binary_compat.erl b/deps/rabbit/src/term_to_binary_compat.erl
new file mode 100644
index 0000000000..327a846d1f
--- /dev/null
+++ b/deps/rabbit/src/term_to_binary_compat.erl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(term_to_binary_compat).
+
+-include("rabbit.hrl").
+
+-export([term_to_binary_1/1]).
+
+term_to_binary_1(Term) ->
+ term_to_binary(Term, [{minor_version, 1}]).
diff --git a/deps/rabbit/src/vhost.erl b/deps/rabbit/src/vhost.erl
new file mode 100644
index 0000000000..ca704183a0
--- /dev/null
+++ b/deps/rabbit/src/vhost.erl
@@ -0,0 +1,172 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(vhost).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("vhost.hrl").
+
+-export([
+ new/2,
+ new/3,
+ fields/0,
+ fields/1,
+ info_keys/0,
+ record_version_to_use/0,
+ upgrade/1,
+ upgrade_to/2,
+ pattern_match_all/0,
+ get_name/1,
+ get_limits/1,
+ get_metadata/1,
+ get_description/1,
+ get_tags/1,
+ set_limits/2
+]).
+
+-define(record_version, vhost_v2).
+
+-type(name() :: binary()).
+
+-type(metadata_key() :: atom()).
+
+-type(metadata() :: #{description => binary(),
+ tags => [atom()],
+ metadata_key() => any()} | undefined).
+
+-type vhost() :: vhost_v1:vhost_v1() | vhost_v2().
+
+-record(vhost, {
+ %% name as a binary
+ virtual_host :: name() | '_',
+ %% proplist of limits configured, if any
+ limits :: list() | '_',
+ metadata :: metadata() | '_'
+}).
+
+-type vhost_v2() :: #vhost{
+ virtual_host :: name(),
+ limits :: list(),
+ metadata :: metadata()
+ }.
+
+-type vhost_pattern() :: vhost_v1:vhost_v1_pattern() |
+ vhost_v2_pattern().
+-type vhost_v2_pattern() :: #vhost{
+ virtual_host :: name() | '_',
+ limits :: '_',
+ metadata :: '_'
+ }.
+
+-export_type([name/0,
+ metadata_key/0,
+ metadata/0,
+ vhost/0,
+ vhost_v2/0,
+ vhost_pattern/0,
+ vhost_v2_pattern/0]).
+
+-spec new(name(), list()) -> vhost().
+new(Name, Limits) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #vhost{virtual_host = Name, limits = Limits};
+ _ ->
+ vhost_v1:new(Name, Limits)
+ end.
+
+-spec new(name(), list(), map()) -> vhost().
+new(Name, Limits, Metadata) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #vhost{virtual_host = Name, limits = Limits, metadata = Metadata};
+ _ ->
+ vhost_v1:new(Name, Limits)
+ end.
+
+-spec record_version_to_use() -> vhost_v1 | vhost_v2.
+
+record_version_to_use() ->
+ case rabbit_feature_flags:is_enabled(virtual_host_metadata) of
+ true -> ?record_version;
+ false -> vhost_v1:record_version_to_use()
+ end.
+
+-spec upgrade(vhost()) -> vhost().
+
+upgrade(#vhost{} = VHost) -> VHost;
+upgrade(OldVHost) -> upgrade_to(record_version_to_use(), OldVHost).
+
+-spec upgrade_to
+(vhost_v2, vhost()) -> vhost_v2();
+(vhost_v1, vhost_v1:vhost_v1()) -> vhost_v1:vhost_v1().
+
+upgrade_to(?record_version, #vhost{} = VHost) ->
+ VHost;
+upgrade_to(?record_version, OldVHost) ->
+ Fields = erlang:tuple_to_list(OldVHost) ++ [#{description => <<"">>, tags => []}],
+ #vhost{} = erlang:list_to_tuple(Fields);
+upgrade_to(Version, OldVHost) ->
+ vhost_v1:upgrade_to(Version, OldVHost).
+
+
+fields() ->
+ case record_version_to_use() of
+ ?record_version -> fields(?record_version);
+ _ -> vhost_v1:fields()
+ end.
+
+fields(?record_version) -> record_info(fields, vhost);
+fields(Version) -> vhost_v1:fields(Version).
+
+info_keys() ->
+ case record_version_to_use() of
+ %% note: this reports description and tags separately even though
+ %% they are stored in the metadata map. MK.
+ ?record_version -> [name, description, tags, metadata, tracing, cluster_state];
+ _ -> vhost_v1:info_keys()
+ end.
+
+-spec pattern_match_all() -> vhost_pattern().
+
+pattern_match_all() ->
+ case record_version_to_use() of
+ ?record_version -> #vhost{_ = '_'};
+ _ -> vhost_v1:pattern_match_all()
+ end.
+
+-spec get_name(vhost()) -> name().
+get_name(#vhost{virtual_host = Value}) -> Value;
+get_name(VHost) -> vhost_v1:get_name(VHost).
+
+-spec get_limits(vhost()) -> list().
+get_limits(#vhost{limits = Value}) -> Value;
+get_limits(VHost) -> vhost_v1:get_limits(VHost).
+
+-spec get_metadata(vhost()) -> metadata().
+get_metadata(#vhost{metadata = Value}) -> Value;
+get_metadata(VHost) -> vhost_v1:get_metadata(VHost).
+
+-spec get_description(vhost()) -> binary().
+get_description(#vhost{} = VHost) ->
+ maps:get(description, get_metadata(VHost), undefined);
+get_description(VHost) ->
+ vhost_v1:get_description(VHost).
+
+-spec get_tags(vhost()) -> [atom()].
+get_tags(#vhost{} = VHost) ->
+ maps:get(tags, get_metadata(VHost), undefined);
+get_tags(VHost) ->
+ vhost_v1:get_tags(VHost).
+
+set_limits(VHost, Value) ->
+ case record_version_to_use() of
+ ?record_version ->
+ VHost#vhost{limits = Value};
+ _ ->
+ vhost_v1:set_limits(VHost, Value)
+ end.
diff --git a/deps/rabbit/src/vhost_v1.erl b/deps/rabbit/src/vhost_v1.erl
new file mode 100644
index 0000000000..5b53eb148a
--- /dev/null
+++ b/deps/rabbit/src/vhost_v1.erl
@@ -0,0 +1,106 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(vhost_v1).
+
+-include("vhost.hrl").
+
+-export([new/2,
+ new/3,
+ upgrade/1,
+ upgrade_to/2,
+ fields/0,
+ fields/1,
+ info_keys/0,
+ field_name/0,
+ record_version_to_use/0,
+ pattern_match_all/0,
+ get_name/1,
+ get_limits/1,
+ get_metadata/1,
+ get_description/1,
+ get_tags/1,
+ set_limits/2
+]).
+
+-define(record_version, ?MODULE).
+
+%% Represents a vhost.
+%%
+%% Historically this record had 2 arguments although the 2nd
+%% was never used (`dummy`, always undefined). This is because
+%% single field records were/are illegal in OTP.
+%%
+%% As of 3.6.x, the second argument is vhost limits,
+%% which is actually used and has the same default.
+%% Nonetheless, this required a migration, see rabbit_upgrade_functions.
+
+-record(vhost, {
+ %% name as a binary
+ virtual_host :: vhost:name() | '_',
+ %% proplist of limits configured, if any
+ limits :: list() | '_'}).
+
+-type vhost() :: vhost_v1().
+-type vhost_v1() :: #vhost{
+ virtual_host :: vhost:name(),
+ limits :: list()
+ }.
+
+-export_type([vhost/0,
+ vhost_v1/0,
+ vhost_pattern/0,
+ vhost_v1_pattern/0]).
+
+
+-spec new(vhost:name(), list()) -> vhost().
+new(Name, Limits) ->
+ #vhost{virtual_host = Name, limits = Limits}.
+
+-spec new(vhost:name(), list(), map()) -> vhost().
+new(Name, Limits, _Metadata) ->
+ #vhost{virtual_host = Name, limits = Limits}.
+
+
+-spec record_version_to_use() -> vhost_v1.
+record_version_to_use() ->
+ ?record_version.
+
+-spec upgrade(vhost()) -> vhost().
+upgrade(#vhost{} = VHost) -> VHost.
+
+-spec upgrade_to(vhost_v1, vhost()) -> vhost().
+upgrade_to(?record_version, #vhost{} = VHost) ->
+ VHost.
+
+fields() -> fields(?record_version).
+
+fields(?record_version) -> record_info(fields, vhost).
+
+field_name() -> #vhost.virtual_host.
+
+info_keys() -> [name, tracing, cluster_state].
+
+-type vhost_pattern() :: vhost_v1_pattern().
+-type vhost_v1_pattern() :: #vhost{
+ virtual_host :: vhost:name() | '_',
+ limits :: '_'
+ }.
+
+-spec pattern_match_all() -> vhost_pattern().
+
+pattern_match_all() -> #vhost{_ = '_'}.
+
+get_name(#vhost{virtual_host = Value}) -> Value.
+get_limits(#vhost{limits = Value}) -> Value.
+
+get_metadata(_VHost) -> undefined.
+get_description(_VHost) -> undefined.
+get_tags(_VHost) -> undefined.
+
+set_limits(VHost, Value) ->
+ VHost#vhost{limits = Value}.
diff --git a/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl b/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl
new file mode 100644
index 0000000000..a02c4721bc
--- /dev/null
+++ b/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl
@@ -0,0 +1,302 @@
+-module(amqqueue_backward_compatibility_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("amqqueue.hrl").
+
+-export([all/0,
+ groups/0,
+ init_per_suite/2,
+ end_per_suite/2,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+
+ new_amqqueue_v1_is_amqqueue/1,
+ new_amqqueue_v2_is_amqqueue/1,
+ random_term_is_not_amqqueue/1,
+
+ amqqueue_v1_is_durable/1,
+ amqqueue_v2_is_durable/1,
+ random_term_is_not_durable/1,
+
+ amqqueue_v1_state_matching/1,
+ amqqueue_v2_state_matching/1,
+ random_term_state_matching/1,
+
+ amqqueue_v1_type_matching/1,
+ amqqueue_v2_type_matching/1,
+ random_term_type_matching/1,
+
+ upgrade_v1_to_v2/1
+ ]).
+
+-define(long_tuple, {random_tuple, a, b, c, d, e, f, g, h, i, j, k, l, m,
+ n, o, p, q, r, s, t, u, v, w, x, y, z}).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [new_amqqueue_v1_is_amqqueue,
+ new_amqqueue_v2_is_amqqueue,
+ random_term_is_not_amqqueue,
+ amqqueue_v1_is_durable,
+ amqqueue_v2_is_durable,
+ random_term_is_not_durable,
+ amqqueue_v1_state_matching,
+ amqqueue_v2_state_matching,
+ random_term_state_matching,
+ amqqueue_v1_type_matching,
+ amqqueue_v2_type_matching,
+ random_term_type_matching]}
+ ].
+
+init_per_suite(_, Config) -> Config.
+end_per_suite(_, Config) -> Config.
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(_, Config) -> Config.
+end_per_testcase(_, Config) -> Config.
+
+new_amqqueue_v1_is_amqqueue(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ Queue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ false,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(?is_amqqueue(Queue)),
+ ?assert(?is_amqqueue_v1(Queue)),
+ ?assert(not ?is_amqqueue_v2(Queue)),
+ ?assert(?amqqueue_is_classic(Queue)),
+ ?assert(amqqueue:is_classic(Queue)),
+ ?assert(not ?amqqueue_is_quorum(Queue)),
+ ?assert(not ?amqqueue_vhost_equals(Queue, <<"frazzle">>)),
+ ?assert(?amqqueue_has_valid_pid(Queue)),
+ ?assert(?amqqueue_pid_equals(Queue, self())),
+ ?assert(?amqqueue_pids_are_equal(Queue, Queue)),
+ ?assert(?amqqueue_pid_runs_on_local_node(Queue)),
+ ?assert(amqqueue:qnode(Queue) == node()).
+
+new_amqqueue_v2_is_amqqueue(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v2),
+ Queue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ false,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ rabbit_classic_queue),
+ ?assert(?is_amqqueue(Queue)),
+ ?assert(?is_amqqueue_v2(Queue)),
+ ?assert(not ?is_amqqueue_v1(Queue)),
+ ?assert(?amqqueue_is_classic(Queue)),
+ ?assert(amqqueue:is_classic(Queue)),
+ ?assert(not ?amqqueue_is_quorum(Queue)),
+ ?assert(not ?amqqueue_vhost_equals(Queue, <<"frazzle">>)),
+ ?assert(?amqqueue_has_valid_pid(Queue)),
+ ?assert(?amqqueue_pid_equals(Queue, self())),
+ ?assert(?amqqueue_pids_are_equal(Queue, Queue)),
+ ?assert(?amqqueue_pid_runs_on_local_node(Queue)),
+ ?assert(amqqueue:qnode(Queue) == node()).
+
+random_term_is_not_amqqueue(_) ->
+ Term = ?long_tuple,
+ ?assert(not ?is_amqqueue(Term)),
+ ?assert(not ?is_amqqueue_v2(Term)),
+ ?assert(not ?is_amqqueue_v1(Term)).
+
+%% -------------------------------------------------------------------
+
+amqqueue_v1_is_durable(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ TransientQueue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ false,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ DurableQueue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(not ?amqqueue_is_durable(TransientQueue)),
+ ?assert(?amqqueue_is_durable(DurableQueue)).
+
+amqqueue_v2_is_durable(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ TransientQueue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ false,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ classic),
+ DurableQueue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ classic),
+ ?assert(not ?amqqueue_is_durable(TransientQueue)),
+ ?assert(?amqqueue_is_durable(DurableQueue)).
+
+random_term_is_not_durable(_) ->
+ Term = ?long_tuple,
+ ?assert(not ?amqqueue_is_durable(Term)).
+
+%% -------------------------------------------------------------------
+
+amqqueue_v1_state_matching(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ Queue1 = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(?amqqueue_state_is(Queue1, live)),
+ Queue2 = amqqueue:set_state(Queue1, stopped),
+ ?assert(?amqqueue_state_is(Queue2, stopped)).
+
+amqqueue_v2_state_matching(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ Queue1 = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ classic),
+ ?assert(?amqqueue_state_is(Queue1, live)),
+ Queue2 = amqqueue:set_state(Queue1, stopped),
+ ?assert(?amqqueue_state_is(Queue2, stopped)).
+
+random_term_state_matching(_) ->
+ Term = ?long_tuple,
+ ?assert(not ?amqqueue_state_is(Term, live)).
+
+%% -------------------------------------------------------------------
+
+amqqueue_v1_type_matching(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ Queue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(?amqqueue_is_classic(Queue)),
+ ?assert(amqqueue:is_classic(Queue)),
+ ?assert(not ?amqqueue_is_quorum(Queue)).
+
+amqqueue_v2_type_matching(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ ClassicQueue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ rabbit_classic_queue),
+ ?assert(?amqqueue_is_classic(ClassicQueue)),
+ ?assert(amqqueue:is_classic(ClassicQueue)),
+ ?assert(not ?amqqueue_is_quorum(ClassicQueue)),
+ ?assert(not amqqueue:is_quorum(ClassicQueue)),
+ QuorumQueue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ rabbit_quorum_queue),
+ ?assert(not ?amqqueue_is_classic(QuorumQueue)),
+ ?assert(not amqqueue:is_classic(QuorumQueue)),
+ ?assert(?amqqueue_is_quorum(QuorumQueue)),
+ ?assert(amqqueue:is_quorum(QuorumQueue)).
+
+random_term_type_matching(_) ->
+ Term = ?long_tuple,
+ ?assert(not ?amqqueue_is_classic(Term)),
+ ?assert(not ?amqqueue_is_quorum(Term)),
+ ?assertException(error, function_clause, amqqueue:is_classic(Term)),
+ ?assertException(error, function_clause, amqqueue:is_quorum(Term)).
+
+%% -------------------------------------------------------------------
+
+upgrade_v1_to_v2(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ OldQueue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(?is_amqqueue_v1(OldQueue)),
+ ?assert(not ?is_amqqueue_v2(OldQueue)),
+ NewQueue = amqqueue:upgrade_to(amqqueue_v2, OldQueue),
+ ?assert(not ?is_amqqueue_v1(NewQueue)),
+ ?assert(?is_amqqueue_v2(NewQueue)).
diff --git a/deps/rabbit/test/backing_queue_SUITE.erl b/deps/rabbit/test/backing_queue_SUITE.erl
new file mode 100644
index 0000000000..be6004c8b9
--- /dev/null
+++ b/deps/rabbit/test/backing_queue_SUITE.erl
@@ -0,0 +1,1632 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(backing_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("amqqueue.hrl").
+
+-compile(export_all).
+
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE, msg_store_transient).
+
+-define(TIMEOUT, 30000).
+-define(VHOST, <<"/">>).
+
+-define(VARIABLE_QUEUE_TESTCASES, [
+ variable_queue_dynamic_duration_change,
+ variable_queue_partial_segments_delta_thing,
+ variable_queue_all_the_bits_not_covered_elsewhere_A,
+ variable_queue_all_the_bits_not_covered_elsewhere_B,
+ variable_queue_drop,
+ variable_queue_fold_msg_on_disk,
+ variable_queue_dropfetchwhile,
+ variable_queue_dropwhile_varying_ram_duration,
+ variable_queue_fetchwhile_varying_ram_duration,
+ variable_queue_ack_limiting,
+ variable_queue_purge,
+ variable_queue_requeue,
+ variable_queue_requeue_ram_beta,
+ variable_queue_fold,
+ variable_queue_batch_publish,
+ variable_queue_batch_publish_delivered
+ ]).
+
+-define(BACKING_QUEUE_TESTCASES, [
+ bq_queue_index,
+ bq_queue_index_props,
+ {variable_queue_default, [parallel], ?VARIABLE_QUEUE_TESTCASES},
+ {variable_queue_lazy, [parallel], ?VARIABLE_QUEUE_TESTCASES ++
+ [variable_queue_mode_change]},
+ bq_variable_queue_delete_msg_store_files_callback,
+ bq_queue_recover
+ ]).
+
+all() ->
+ [
+ {group, backing_queue_tests}
+ ].
+
+groups() ->
+ [
+ {backing_queue_tests, [], [
+ msg_store,
+ {backing_queue_embed_limit_0, [], ?BACKING_QUEUE_TESTCASES},
+ {backing_queue_embed_limit_1024, [], ?BACKING_QUEUE_TESTCASES}
+ ]}
+ ].
+
+group(backing_queue_tests) ->
+ [
+ %% Several tests based on lazy queues may take more than 30 minutes.
+ {timetrap, {hours, 1}}
+ ];
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 2,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun(C) -> init_per_group1(Group, C) end,
+ fun setup_file_handle_cache/1
+ ]);
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [
+ fun(C) -> init_per_group1(Group, C) end
+ ])
+ end.
+
+init_per_group1(backing_queue_tests, Config) ->
+ Module = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, get_env, [rabbit, backing_queue_module]),
+ case Module of
+ {ok, rabbit_priority_queue} ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_backing_queue_test_group, [Config]);
+ _ ->
+ {skip, rabbit_misc:format(
+ "Backing queue module not supported by this test group: ~p~n",
+ [Module])}
+ end;
+init_per_group1(backing_queue_embed_limit_0, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below, 0]),
+ Config;
+init_per_group1(backing_queue_embed_limit_1024, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below, 1024]),
+ Config;
+init_per_group1(variable_queue_default, Config) ->
+ rabbit_ct_helpers:set_config(Config, {variable_queue_type, default});
+init_per_group1(variable_queue_lazy, Config) ->
+ rabbit_ct_helpers:set_config(Config, {variable_queue_type, lazy});
+init_per_group1(from_cluster_node1, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {0, 1}});
+init_per_group1(from_cluster_node2, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {1, 0}});
+init_per_group1(_, Config) ->
+ Config.
+
+setup_file_handle_cache(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_file_handle_cache1, []),
+ Config.
+
+setup_file_handle_cache1() ->
+ %% FIXME: Why are we doing this?
+ application:set_env(rabbit, file_handles_high_watermark, 10),
+ ok = file_handle_cache:set_limit(10),
+ ok.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ [fun(C) -> end_per_group1(Group, C) end] ++
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+end_per_group1(backing_queue_tests, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, teardown_backing_queue_test_group, [Config]);
+end_per_group1(Group, Config)
+when Group =:= backing_queue_embed_limit_0
+orelse Group =:= backing_queue_embed_limit_1024 ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below,
+ ?config(rmq_queue_index_embed_msgs_below, Config)]),
+ Config;
+end_per_group1(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) when Testcase == variable_queue_requeue;
+ Testcase == variable_queue_fold ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config, 0, application, set_env,
+ [rabbit, queue_explicit_gc_run_operation_threshold, 0]),
+ rabbit_ct_helpers:testcase_started(Config, Testcase);
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) when Testcase == variable_queue_requeue;
+ Testcase == variable_queue_fold ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config, 0, application, set_env,
+ [rabbit, queue_explicit_gc_run_operation_threshold, 1000]),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Message store.
+%% -------------------------------------------------------------------
+
+msg_store(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, msg_store1, [Config]).
+
+msg_store1(_Config) ->
+ restart_msg_store_empty(),
+ MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
+ {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
+ Ref = rabbit_guid:gen(),
+ {Cap, MSCState} = msg_store_client_init_capture(
+ ?PERSISTENT_MSG_STORE, Ref),
+ Ref2 = rabbit_guid:gen(),
+ {Cap2, MSC2State} = msg_store_client_init_capture(
+ ?PERSISTENT_MSG_STORE, Ref2),
+ %% check we don't contain any of the msgs we're about to publish
+ false = msg_store_contains(false, MsgIds, MSCState),
+ %% test confirm logic
+ passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState),
+ %% check we don't contain any of the msgs we're about to publish
+ false = msg_store_contains(false, MsgIds, MSCState),
+ %% publish the first half
+ ok = msg_store_write(MsgIds1stHalf, MSCState),
+ %% sync on the first half
+ ok = on_disk_await(Cap, MsgIds1stHalf),
+ %% publish the second half
+ ok = msg_store_write(MsgIds2ndHalf, MSCState),
+ %% check they're all in there
+ true = msg_store_contains(true, MsgIds, MSCState),
+ %% publish the latter half twice so we hit the caching and ref
+ %% count code. We need to do this through a 2nd client since a
+ %% single client is not supposed to write the same message more
+ %% than once without first removing it.
+ ok = msg_store_write(MsgIds2ndHalf, MSC2State),
+ %% check they're still all in there
+ true = msg_store_contains(true, MsgIds, MSCState),
+ %% sync on the 2nd half
+ ok = on_disk_await(Cap2, MsgIds2ndHalf),
+ %% cleanup
+ ok = on_disk_stop(Cap2),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
+ ok = on_disk_stop(Cap),
+ %% read them all
+ MSCState1 = msg_store_read(MsgIds, MSCState),
+ %% read them all again - this will hit the cache, not disk
+ MSCState2 = msg_store_read(MsgIds, MSCState1),
+ %% remove them all
+ ok = msg_store_remove(MsgIds, MSCState2),
+ %% check first half doesn't exist
+ false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
+ %% check second half does exist
+ true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
+ %% read the second half again
+ MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
+ %% read the second half again, just for fun (aka code coverage)
+ MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
+ ok = rabbit_msg_store:client_terminate(MSCState4),
+ %% stop and restart, preserving every other msg in 2nd half
+ ok = rabbit_variable_queue:stop_msg_store(?VHOST),
+ ok = rabbit_variable_queue:start_msg_store(?VHOST,
+ [], {fun ([]) -> finished;
+ ([MsgId|MsgIdsTail])
+ when length(MsgIdsTail) rem 2 == 0 ->
+ {MsgId, 1, MsgIdsTail};
+ ([MsgId|MsgIdsTail]) ->
+ {MsgId, 0, MsgIdsTail}
+ end, MsgIds2ndHalf}),
+ MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ %% check we have the right msgs left
+ lists:foldl(
+ fun (MsgId, Bool) ->
+ not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
+ end, false, MsgIds2ndHalf),
+ ok = rabbit_msg_store:client_terminate(MSCState5),
+ %% restart empty
+ restart_msg_store_empty(),
+ MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ %% check we don't contain any of the msgs
+ false = msg_store_contains(false, MsgIds, MSCState6),
+ %% publish the first half again
+ ok = msg_store_write(MsgIds1stHalf, MSCState6),
+ %% this should force some sort of sync internally otherwise misread
+ ok = rabbit_msg_store:client_terminate(
+ msg_store_read(MsgIds1stHalf, MSCState6)),
+ MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ ok = msg_store_remove(MsgIds1stHalf, MSCState7),
+ ok = rabbit_msg_store:client_terminate(MSCState7),
+ %% restart empty
+ restart_msg_store_empty(), %% now safe to reuse msg_ids
+ %% push a lot of msgs in... at least 100 files worth
+ {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
+ PayloadSizeBits = 65536,
+ BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
+ MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)],
+ Payload = << 0:PayloadSizeBits >>,
+ ok = with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MSCStateM) ->
+ [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) ||
+ MsgId <- MsgIdsBig],
+ MSCStateM
+ end),
+ %% now read them to ensure we hit the fast client-side reading
+ ok = foreach_with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MsgId, MSCStateM) ->
+ {{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
+ MsgId, MSCStateM),
+ MSCStateN
+ end, MsgIdsBig),
+ %% .., then 3s by 1...
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]),
+ %% .., then remove 3s by 2, from the young end first. This hits
+ %% GC (under 50% good data left, but no empty files. Must GC).
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]),
+ %% .., then remove 3s by 3, from the young end first. This hits
+ %% GC...
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]),
+ %% ensure empty
+ ok = with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MSCStateM) ->
+ false = msg_store_contains(false, MsgIdsBig, MSCStateM),
+ MSCStateM
+ end),
+ %%
+ passed = test_msg_store_client_delete_and_terminate(),
+ %% restart empty
+ restart_msg_store_empty(),
+ passed.
+
+restart_msg_store_empty() ->
+ ok = rabbit_variable_queue:stop_msg_store(?VHOST),
+ ok = rabbit_variable_queue:start_msg_store(?VHOST,
+ undefined, {fun (ok) -> finished end, ok}).
+
+msg_id_bin(X) ->
+ erlang:md5(term_to_binary(X)).
+
+on_disk_capture() ->
+ receive
+ {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
+ stop -> done
+ end.
+
+on_disk_capture([_|_], _Awaiting, Pid) ->
+ Pid ! {self(), surplus};
+on_disk_capture(OnDisk, Awaiting, Pid) ->
+ receive
+ {on_disk, MsgIdsS} ->
+ MsgIds = gb_sets:to_list(MsgIdsS),
+ on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
+ Pid);
+ stop ->
+ done
+ after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
+ case Awaiting of
+ [] -> Pid ! {self(), arrived}, on_disk_capture();
+ _ -> Pid ! {self(), timeout}
+ end
+ end.
+
+on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
+ Pid ! {await, MsgIds, self()},
+ receive
+ {Pid, arrived} -> ok;
+ {Pid, Error} -> Error
+ end.
+
+on_disk_stop(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ Pid ! stop,
+ receive {'DOWN', MRef, process, Pid, _Reason} ->
+ ok
+ end.
+
+msg_store_client_init_capture(MsgStore, Ref) ->
+ Pid = spawn(fun on_disk_capture/0),
+ {Pid, rabbit_vhost_msg_store:client_init(?VHOST, MsgStore, Ref,
+ fun (MsgIds, _ActionTaken) ->
+ Pid ! {on_disk, MsgIds}
+ end, undefined)}.
+
+msg_store_contains(Atom, MsgIds, MSCState) ->
+ Atom = lists:foldl(
+ fun (MsgId, Atom1) when Atom1 =:= Atom ->
+ rabbit_msg_store:contains(MsgId, MSCState) end,
+ Atom, MsgIds).
+
+msg_store_read(MsgIds, MSCState) ->
+ lists:foldl(fun (MsgId, MSCStateM) ->
+ {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
+ MsgId, MSCStateM),
+ MSCStateN
+ end, MSCState, MsgIds).
+
+msg_store_write(MsgIds, MSCState) ->
+ ok = lists:foldl(fun (MsgId, ok) ->
+ rabbit_msg_store:write(MsgId, MsgId, MSCState)
+ end, ok, MsgIds).
+
+msg_store_write_flow(MsgIds, MSCState) ->
+ ok = lists:foldl(fun (MsgId, ok) ->
+ rabbit_msg_store:write_flow(MsgId, MsgId, MSCState)
+ end, ok, MsgIds).
+
+msg_store_remove(MsgIds, MSCState) ->
+ rabbit_msg_store:remove(MsgIds, MSCState).
+
+msg_store_remove(MsgStore, Ref, MsgIds) ->
+ with_msg_store_client(MsgStore, Ref,
+ fun (MSCStateM) ->
+ ok = msg_store_remove(MsgIds, MSCStateM),
+ MSCStateM
+ end).
+
+with_msg_store_client(MsgStore, Ref, Fun) ->
+ rabbit_msg_store:client_terminate(
+ Fun(msg_store_client_init(MsgStore, Ref))).
+
+foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
+ rabbit_msg_store:client_terminate(
+ lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
+ msg_store_client_init(MsgStore, Ref), L)).
+
+test_msg_store_confirms(MsgIds, Cap, MSCState) ->
+ %% write -> confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% remove -> _
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, []),
+ %% write, remove -> confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% write, remove, write -> confirmed, confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds ++ MsgIds),
+ %% remove, write -> confirmed
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% remove, write, remove -> confirmed
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% confirmation on timer-based sync
+ passed = test_msg_store_confirm_timer(),
+ passed.
+
+test_msg_store_confirm_timer() ->
+ Ref = rabbit_guid:gen(),
+ MsgId = msg_id_bin(1),
+ Self = self(),
+ MSCState = rabbit_vhost_msg_store:client_init(
+ ?VHOST,
+ ?PERSISTENT_MSG_STORE,
+ Ref,
+ fun (MsgIds, _ActionTaken) ->
+ case gb_sets:is_member(MsgId, MsgIds) of
+ true -> Self ! on_disk;
+ false -> ok
+ end
+ end, undefined),
+ ok = msg_store_write([MsgId], MSCState),
+ ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState, false),
+ ok = msg_store_remove([MsgId], MSCState),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ passed.
+
+msg_store_keep_busy_until_confirm(MsgIds, MSCState, Blocked) ->
+ After = case Blocked of
+ false -> 0;
+ true -> ?MAX_WAIT
+ end,
+ Recurse = fun () -> msg_store_keep_busy_until_confirm(
+ MsgIds, MSCState, credit_flow:blocked()) end,
+ receive
+ on_disk -> ok;
+ {bump_credit, Msg} -> credit_flow:handle_bump_msg(Msg),
+ Recurse()
+ after After ->
+ ok = msg_store_write_flow(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ Recurse()
+ end.
+
+test_msg_store_client_delete_and_terminate() ->
+ restart_msg_store_empty(),
+ MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)],
+ Ref = rabbit_guid:gen(),
+ MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ ok = msg_store_write(MsgIds, MSCState),
+ %% test the 'dying client' fast path for writes
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ passed.
+
+%% -------------------------------------------------------------------
+%% Backing queue.
+%% -------------------------------------------------------------------
+
+setup_backing_queue_test_group(Config) ->
+ {ok, FileSizeLimit} =
+ application:get_env(rabbit, msg_store_file_size_limit),
+ application:set_env(rabbit, msg_store_file_size_limit, 512),
+ {ok, MaxJournal} =
+ application:get_env(rabbit, queue_index_max_journal_entries),
+ application:set_env(rabbit, queue_index_max_journal_entries, 128),
+ application:set_env(rabbit, msg_store_file_size_limit,
+ FileSizeLimit),
+ {ok, Bytes} =
+ application:get_env(rabbit, queue_index_embed_msgs_below),
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_queue_index_max_journal_entries, MaxJournal},
+ {rmq_queue_index_embed_msgs_below, Bytes}
+ ]).
+
+teardown_backing_queue_test_group(Config) ->
+ %% FIXME: Undo all the setup function did.
+ application:set_env(rabbit, queue_index_max_journal_entries,
+ ?config(rmq_queue_index_max_journal_entries, Config)),
+ %% We will have restarted the message store, and thus changed
+ %% the order of the children of rabbit_sup. This will cause
+ %% problems if there are subsequent failures - see bug 24262.
+ ok = restart_app(),
+ Config.
+
+bq_queue_index(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_index1, [Config]).
+
+bq_queue_index1(_Config) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+ TwoSegs = SegmentSize + SegmentSize,
+ MostOfASegment = trunc(SegmentSize*0.75),
+ SeqIdsA = lists:seq(0, MostOfASegment-1),
+ SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
+ SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
+ SeqIdsD = lists:seq(0, SegmentSize*4),
+
+ with_empty_test_queue(
+ fun (Qi0, QName) ->
+ {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0),
+ {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
+ {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2),
+ {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3),
+ ok = verify_read_with_published(false, false, ReadA,
+ lists:reverse(SeqIdsMsgIdsA)),
+ %% should get length back as 0, as all the msgs were transient
+ {0, 0, Qi6} = restart_test_queue(Qi4, QName),
+ {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6),
+ {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
+ {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8),
+ {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9),
+ ok = verify_read_with_published(false, true, ReadB,
+ lists:reverse(SeqIdsMsgIdsB)),
+ %% should get length back as MostOfASegment
+ LenB = length(SeqIdsB),
+ BytesB = LenB * 10,
+ {LenB, BytesB, Qi12} = restart_test_queue(Qi10, QName),
+ {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12),
+ Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13),
+ {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14),
+ ok = verify_read_with_published(true, true, ReadC,
+ lists:reverse(SeqIdsMsgIdsB)),
+ Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15),
+ Qi17 = rabbit_queue_index:flush(Qi16),
+ %% Everything will have gone now because #pubs == #acks
+ {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17),
+ %% should get length back as 0 because all persistent
+ %% msgs have been acked
+ {0, 0, Qi19} = restart_test_queue(Qi18, QName),
+ Qi19
+ end),
+
+ %% These next bits are just to hit the auto deletion of segment files.
+ %% First, partials:
+ %% a) partial pub+del+ack, then move to new segment
+ with_empty_test_queue(
+ fun (Qi0, _QName) ->
+ {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+ Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2),
+ Qi4 = rabbit_queue_index:flush(Qi3),
+ {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
+ false, Qi4),
+ Qi5
+ end),
+
+ %% b) partial pub+del, then move to new segment, then ack all in old segment
+ with_empty_test_queue(
+ fun (Qi0, _QName) ->
+ {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+ {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
+ false, Qi2),
+ Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3),
+ rabbit_queue_index:flush(Qi4)
+ end),
+
+ %% c) just fill up several segments of all pubs, then +dels, then +acks
+ with_empty_test_queue(
+ fun (Qi0, _QName) ->
+ {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1),
+ Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2),
+ rabbit_queue_index:flush(Qi3)
+ end),
+
+ %% d) get messages in all states to a segment, then flush, then do
+ %% the same again, don't flush and read. This will hit all
+ %% possibilities in combining the segment with the journal.
+ with_empty_test_queue(
+ fun (Qi0, _QName) ->
+ {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+ Qi3 = rabbit_queue_index:ack([0], Qi2),
+ Qi4 = rabbit_queue_index:flush(Qi3),
+ {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
+ Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+ Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+ {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7),
+ {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8),
+ ok = verify_read_with_published(true, false, ReadD,
+ [Four, Five, Six]),
+ {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9),
+ ok = verify_read_with_published(false, false, ReadE,
+ [Seven, Eight]),
+ Qi10
+ end),
+
+ %% e) as for (d), but use terminate instead of read, which will
+ %% exercise journal_minus_segment, not segment_plus_journal.
+ with_empty_test_queue(
+ fun (Qi0, QName) ->
+ {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
+ true, Qi0),
+ Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+ Qi3 = rabbit_queue_index:ack([0], Qi2),
+ {5, 50, Qi4} = restart_test_queue(Qi3, QName),
+ {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
+ Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+ Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+ {5, 50, Qi8} = restart_test_queue(Qi7, QName),
+ Qi8
+ end),
+
+ ok = rabbit_variable_queue:stop(?VHOST),
+ {ok, _} = rabbit_variable_queue:start(?VHOST, []),
+
+ passed.
+
+bq_queue_index_props(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_index_props1, [Config]).
+
+bq_queue_index_props1(_Config) ->
+ with_empty_test_queue(
+ fun(Qi0, _QName) ->
+ MsgId = rabbit_guid:gen(),
+ Props = #message_properties{expiry=12345, size = 10},
+ Qi1 = rabbit_queue_index:publish(
+ MsgId, 1, Props, true, infinity, Qi0),
+ {[{MsgId, 1, Props, _, _}], Qi2} =
+ rabbit_queue_index:read(1, 2, Qi1),
+ Qi2
+ end),
+
+ ok = rabbit_variable_queue:stop(?VHOST),
+ {ok, _} = rabbit_variable_queue:start(?VHOST, []),
+
+ passed.
+
+bq_variable_queue_delete_msg_store_files_callback(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_variable_queue_delete_msg_store_files_callback1, [Config]).
+
+bq_variable_queue_delete_msg_store_files_callback1(Config) ->
+ ok = restart_msg_store_empty(),
+ QName0 = queue_name(Config, <<"bq_variable_queue_delete_msg_store_files_callback-q">>),
+ {new, Q} = rabbit_amqqueue:declare(QName0, true, false, [], none, <<"acting-user">>),
+ QName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ Payload = <<0:8388608>>, %% 1MB
+ Count = 30,
+ QTState = publish_and_confirm(Q, Payload, Count),
+
+ rabbit_amqqueue:set_ram_duration_target(QPid, 0),
+
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+
+ CountMinusOne = Count - 1,
+ {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}, _} =
+ rabbit_amqqueue:basic_get(Q, true, Limiter,
+ <<"bq_variable_queue_delete_msg_store_files_callback1">>,
+ QTState),
+ {ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
+
+ %% give the queue a second to receive the close_fds callback msg
+ timer:sleep(1000),
+
+ rabbit_amqqueue:delete(Q, false, false, <<"acting-user">>),
+ passed.
+
+bq_queue_recover(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_recover1, [Config]).
+
+bq_queue_recover1(Config) ->
+ Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+ QName0 = queue_name(Config, <<"bq_queue_recover-q">>),
+ {new, Q} = rabbit_amqqueue:declare(QName0, true, false, [], none, <<"acting-user">>),
+ QName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ QT = publish_and_confirm(Q, <<>>, Count),
+ SupPid = get_queue_sup_pid(Q),
+ true = is_pid(SupPid),
+ exit(SupPid, kill),
+ exit(QPid, kill),
+ MRef = erlang:monitor(process, QPid),
+ receive {'DOWN', MRef, process, QPid, _Info} -> ok
+ after 10000 -> exit(timeout_waiting_for_queue_death)
+ end,
+ rabbit_amqqueue:stop(?VHOST),
+ {Recovered, []} = rabbit_amqqueue:recover(?VHOST),
+ rabbit_amqqueue:start(Recovered),
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+ rabbit_amqqueue:with_or_die(
+ QName,
+ fun (Q1) when ?is_amqqueue(Q1) ->
+ QPid1 = amqqueue:get_pid(Q1),
+ CountMinusOne = Count - 1,
+ {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}, _} =
+ rabbit_amqqueue:basic_get(Q1, false, Limiter,
+ <<"bq_queue_recover1">>, QT),
+ exit(QPid1, shutdown),
+ VQ1 = variable_queue_init(Q, true),
+ {{_Msg1, true, _AckTag1}, VQ2} =
+ rabbit_variable_queue:fetch(true, VQ1),
+ CountMinusOne = rabbit_variable_queue:len(VQ2),
+ _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
+ ok = rabbit_amqqueue:internal_delete(QName, <<"acting-user">>)
+ end),
+ passed.
+
+%% Return the PID of the given queue's supervisor.
+get_queue_sup_pid(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ VHost = QName#resource.virtual_host,
+ {ok, AmqSup} = rabbit_amqqueue_sup_sup:find_for_vhost(VHost, node(QPid)),
+ Sups = supervisor:which_children(AmqSup),
+ get_queue_sup_pid(Sups, QPid).
+
+get_queue_sup_pid([{_, SupPid, _, _} | Rest], QueuePid) ->
+ WorkerPids = [Pid || {_, Pid, _, _} <- supervisor:which_children(SupPid)],
+ case lists:member(QueuePid, WorkerPids) of
+ true -> SupPid;
+ false -> get_queue_sup_pid(Rest, QueuePid)
+ end;
+get_queue_sup_pid([], _QueuePid) ->
+ undefined.
+
+variable_queue_dynamic_duration_change(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dynamic_duration_change1, [Config]).
+
+variable_queue_dynamic_duration_change1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dynamic_duration_change2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dynamic_duration_change2(VQ0, _QName) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+
+ %% start by sending in a couple of segments worth
+ Len = 2*SegmentSize,
+ VQ1 = variable_queue_publish(false, Len, VQ0),
+ %% squeeze and relax queue
+ Churn = Len div 32,
+ VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+ {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+ VQ7 = lists:foldl(
+ fun (Duration1, VQ4) ->
+ {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4),
+ VQ6 = variable_queue_set_ram_duration_target(
+ Duration1, VQ5),
+ publish_fetch_and_ack(Churn, Len, VQ6)
+ end, VQ3, [Duration / 4, 0, Duration / 4, infinity]),
+
+ %% drain
+ {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7),
+ {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8),
+ {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+
+ VQ10.
+
+variable_queue_partial_segments_delta_thing(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_partial_segments_delta_thing1, [Config]).
+
+variable_queue_partial_segments_delta_thing1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_partial_segments_delta_thing2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_partial_segments_delta_thing2(VQ0, _QName) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+ HalfSegment = SegmentSize div 2,
+ OneAndAHalfSegment = SegmentSize + HalfSegment,
+ VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
+ {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
+ VQ3 = check_variable_queue_status(
+ variable_queue_set_ram_duration_target(0, VQ2),
+ %% one segment in q3, and half a segment in delta
+ [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+ {q3, SegmentSize},
+ {len, SegmentSize + HalfSegment}]),
+ VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+ VQ5 = check_variable_queue_status(
+ variable_queue_publish(true, 1, VQ4),
+ %% one alpha, but it's in the same segment as the deltas
+ [{q1, 1},
+ {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+ {q3, SegmentSize},
+ {len, SegmentSize + HalfSegment + 1}]),
+ {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
+ SegmentSize + HalfSegment + 1, VQ5),
+ VQ7 = check_variable_queue_status(
+ VQ6,
+ %% the half segment should now be in q3
+ [{q1, 1},
+ {delta, {delta, undefined, 0, undefined}},
+ {q3, HalfSegment},
+ {len, HalfSegment + 1}]),
+ {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
+ HalfSegment + 1, VQ7),
+ {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
+ %% should be empty now
+ {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+ VQ10.
+
+variable_queue_all_the_bits_not_covered_elsewhere_A(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_A1, [Config]).
+
+variable_queue_all_the_bits_not_covered_elsewhere_A1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_all_the_bits_not_covered_elsewhere_A2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_all_the_bits_not_covered_elsewhere_A2(VQ0, QName) ->
+ Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+ VQ1 = variable_queue_publish(true, Count, VQ0),
+ VQ2 = variable_queue_publish(false, Count, VQ1),
+ VQ3 = variable_queue_set_ram_duration_target(0, VQ2),
+ {VQ4, _AckTags} = variable_queue_fetch(Count, true, false,
+ Count + Count, VQ3),
+ {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
+ Count, VQ4),
+ _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+ VQ7 = variable_queue_init(test_amqqueue(QName, true), true),
+ {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
+ Count1 = rabbit_variable_queue:len(VQ8),
+ VQ9 = variable_queue_publish(false, 1, VQ8),
+ VQ10 = variable_queue_set_ram_duration_target(0, VQ9),
+ {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
+ {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
+ VQ12.
+
+variable_queue_all_the_bits_not_covered_elsewhere_B(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_B1, [Config]).
+
+variable_queue_all_the_bits_not_covered_elsewhere_B1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_all_the_bits_not_covered_elsewhere_B2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_all_the_bits_not_covered_elsewhere_B2(VQ0, QName) ->
+ VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+ VQ2 = variable_queue_publish(false, 4, VQ1),
+ {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
+ {_Guids, VQ4} =
+ rabbit_variable_queue:requeue(AckTags, VQ3),
+ VQ5 = rabbit_variable_queue:timeout(VQ4),
+ _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+ VQ7 = variable_queue_init(test_amqqueue(QName, true), true),
+ {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
+ VQ8.
+
+variable_queue_drop(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_drop1, [Config]).
+
+variable_queue_drop1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_drop2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_drop2(VQ0, _QName) ->
+ %% start by sending a messages
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ %% drop message with AckRequired = true
+ {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
+ true = rabbit_variable_queue:is_empty(VQ2),
+ true = AckTag =/= undefinded,
+ %% drop again -> empty
+ {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
+ %% requeue
+ {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
+ %% drop message with AckRequired = false
+ {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
+ true = rabbit_variable_queue:is_empty(VQ5),
+ VQ5.
+
+variable_queue_fold_msg_on_disk(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fold_msg_on_disk1, [Config]).
+
+variable_queue_fold_msg_on_disk1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fold_msg_on_disk2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fold_msg_on_disk2(VQ0, _QName) ->
+ VQ1 = variable_queue_publish(true, 1, VQ0),
+ {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
+ {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
+ ok, VQ2, AckTags),
+ VQ3.
+
+variable_queue_dropfetchwhile(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dropfetchwhile1, [Config]).
+
+variable_queue_dropfetchwhile1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dropfetchwhile2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dropfetchwhile2(VQ0, _QName) ->
+ Count = 10,
+
+ %% add messages with sequential expiry
+ VQ1 = variable_queue_publish(
+ false, 1, Count,
+ fun (N, Props) -> Props#message_properties{expiry = N} end,
+ fun erlang:term_to_binary/1, VQ0),
+
+ %% fetch the first 5 messages
+ {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
+ rabbit_variable_queue:fetchwhile(
+ fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
+ fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
+ {[Msg | MsgAcc], [AckTag | AckAcc]}
+ end, {[], []}, VQ1),
+ true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
+
+ %% requeue them
+ {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
+
+ %% drop the first 5 messages
+ {#message_properties{expiry = 6}, VQ4} =
+ rabbit_variable_queue:dropwhile(
+ fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
+
+ %% fetch 5
+ VQ5 = lists:foldl(fun (N, VQN) ->
+ {{Msg, _, _}, VQM} =
+ rabbit_variable_queue:fetch(false, VQN),
+ true = msg2int(Msg) == N,
+ VQM
+ end, VQ4, lists:seq(6, Count)),
+
+ %% should be empty now
+ true = rabbit_variable_queue:is_empty(VQ5),
+
+ VQ5.
+
+variable_queue_dropwhile_varying_ram_duration(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dropwhile_varying_ram_duration1, [Config]).
+
+variable_queue_dropwhile_varying_ram_duration1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dropwhile_varying_ram_duration2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dropwhile_varying_ram_duration2(VQ0, _QName) ->
+ test_dropfetchwhile_varying_ram_duration(
+ fun (VQ1) ->
+ {_, VQ2} = rabbit_variable_queue:dropwhile(
+ fun (_) -> false end, VQ1),
+ VQ2
+ end, VQ0).
+
+variable_queue_fetchwhile_varying_ram_duration(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fetchwhile_varying_ram_duration1, [Config]).
+
+variable_queue_fetchwhile_varying_ram_duration1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fetchwhile_varying_ram_duration2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fetchwhile_varying_ram_duration2(VQ0, _QName) ->
+ test_dropfetchwhile_varying_ram_duration(
+ fun (VQ1) ->
+ {_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
+ fun (_) -> false end,
+ fun (_, _, A) -> A end,
+ ok, VQ1),
+ VQ2
+ end, VQ0).
+
+test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
+ VQ3 = Fun(VQ2),
+ VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+ VQ5 = variable_queue_publish(false, 1, VQ4),
+ VQ6 = Fun(VQ5),
+ VQ6.
+
+variable_queue_ack_limiting(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_ack_limiting1, [Config]).
+
+variable_queue_ack_limiting1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_ack_limiting2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_ack_limiting2(VQ0, _Config) ->
+ %% start by sending in a bunch of messages
+ Len = 1024,
+ VQ1 = variable_queue_publish(false, Len, VQ0),
+
+ %% squeeze and relax queue
+ Churn = Len div 32,
+ VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+ %% update stats for duration
+ {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+
+ %% fetch half the messages
+ {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
+
+ VQ5 = check_variable_queue_status(
+ VQ4, [{len, Len div 2},
+ {messages_unacknowledged_ram, Len div 2},
+ {messages_ready_ram, Len div 2},
+ {messages_ram, Len}]),
+
+ %% ensure all acks go to disk on 0 duration target
+ VQ6 = check_variable_queue_status(
+ variable_queue_set_ram_duration_target(0, VQ5),
+ [{len, Len div 2},
+ {target_ram_count, 0},
+ {messages_unacknowledged_ram, 0},
+ {messages_ready_ram, 0},
+ {messages_ram, 0}]),
+
+ VQ6.
+
+variable_queue_purge(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_purge1, [Config]).
+
+variable_queue_purge1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_purge2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_purge2(VQ0, _Config) ->
+ LenDepth = fun (VQ) ->
+ {rabbit_variable_queue:len(VQ),
+ rabbit_variable_queue:depth(VQ)}
+ end,
+ VQ1 = variable_queue_publish(false, 10, VQ0),
+ {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
+ {4, VQ3} = rabbit_variable_queue:purge(VQ2),
+ {0, 6} = LenDepth(VQ3),
+ {_, VQ4} = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
+ {2, 6} = LenDepth(VQ4),
+ VQ5 = rabbit_variable_queue:purge_acks(VQ4),
+ {2, 2} = LenDepth(VQ5),
+ VQ5.
+
+variable_queue_requeue(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_requeue1, [Config]).
+
+variable_queue_requeue1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_requeue2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_requeue2(VQ0, _Config) ->
+ {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+ variable_queue_with_holes(VQ0),
+ Msgs =
+ lists:zip(RequeuedMsgs,
+ lists:duplicate(length(RequeuedMsgs), true)) ++
+ lists:zip(FreshMsgs,
+ lists:duplicate(length(FreshMsgs), false)),
+ VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
+ {{M, MRequeued, _}, VQb} =
+ rabbit_variable_queue:fetch(true, VQa),
+ Requeued = MRequeued, %% assertion
+ I = msg2int(M), %% assertion
+ VQb
+ end, VQ1, Msgs),
+ {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
+ VQ3.
+
+%% requeue from ram_pending_ack into q3, move to delta and then empty queue
+variable_queue_requeue_ram_beta(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_requeue_ram_beta1, [Config]).
+
+variable_queue_requeue_ram_beta1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_requeue_ram_beta2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_requeue_ram_beta2(VQ0, _Config) ->
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+ VQ1 = variable_queue_publish(false, Count, VQ0),
+ {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
+ {Back, Front} = lists:split(Count div 2, AcksR),
+ {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
+ VQ4 = variable_queue_set_ram_duration_target(0, VQ3),
+ {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
+ VQ6 = requeue_one_by_one(Front, VQ5),
+ {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
+ {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
+ VQ8.
+
+variable_queue_fold(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fold1, [Config]).
+
+variable_queue_fold1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fold2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fold2(VQ0, _Config) ->
+ {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+ variable_queue_with_holes(VQ0),
+ Count = rabbit_variable_queue:depth(VQ1),
+ Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
+ lists:foldl(fun (Cut, VQ2) ->
+ test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
+ end, VQ1, [0, 1, 2, Count div 2,
+ Count - 1, Count, Count + 1, Count * 2]).
+
+test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
+ {Acc, VQ1} = rabbit_variable_queue:fold(
+ fun (M, _, Pending, A) ->
+ MInt = msg2int(M),
+ Pending = lists:member(MInt, PendingMsgs), %% assert
+ case MInt =< Cut of
+ true -> {cont, [MInt | A]};
+ false -> {stop, A}
+ end
+ end, [], VQ0),
+ Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
+ Expected = lists:reverse(Acc), %% assertion
+ VQ1.
+
+variable_queue_batch_publish(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_batch_publish1, [Config]).
+
+variable_queue_batch_publish1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_batch_publish2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_batch_publish2(VQ, _Config) ->
+ Count = 10,
+ VQ1 = variable_queue_batch_publish(true, Count, VQ),
+ Count = rabbit_variable_queue:len(VQ1),
+ VQ1.
+
+variable_queue_batch_publish_delivered(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_batch_publish_delivered1, [Config]).
+
+variable_queue_batch_publish_delivered1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_batch_publish_delivered2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_batch_publish_delivered2(VQ, _Config) ->
+ Count = 10,
+ VQ1 = variable_queue_batch_publish_delivered(true, Count, VQ),
+ Count = rabbit_variable_queue:depth(VQ1),
+ VQ1.
+
+%% same as test_variable_queue_requeue_ram_beta but randomly changing
+%% the queue mode after every step.
+variable_queue_mode_change(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_mode_change1, [Config]).
+
+variable_queue_mode_change1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_mode_change2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_mode_change2(VQ0, _Config) ->
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+ VQ1 = variable_queue_publish(false, Count, VQ0),
+ VQ2 = maybe_switch_queue_mode(VQ1),
+ {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+ VQ4 = maybe_switch_queue_mode(VQ3),
+ {Back, Front} = lists:split(Count div 2, AcksR),
+ {_, VQ5} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ4),
+ VQ6 = maybe_switch_queue_mode(VQ5),
+ VQ7 = variable_queue_set_ram_duration_target(0, VQ6),
+ VQ8 = maybe_switch_queue_mode(VQ7),
+ {_, VQ9} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ8),
+ VQ10 = maybe_switch_queue_mode(VQ9),
+ VQ11 = requeue_one_by_one(Front, VQ10),
+ VQ12 = maybe_switch_queue_mode(VQ11),
+ {VQ13, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ12),
+ VQ14 = maybe_switch_queue_mode(VQ13),
+ {_, VQ15} = rabbit_variable_queue:ack(AcksAll, VQ14),
+ VQ16 = maybe_switch_queue_mode(VQ15),
+ VQ16.
+
+maybe_switch_queue_mode(VQ) ->
+ Mode = random_queue_mode(),
+ set_queue_mode(Mode, VQ).
+
+random_queue_mode() ->
+ Modes = [lazy, default],
+ lists:nth(rand:uniform(length(Modes)), Modes).
+
+pub_res({_, VQS}) ->
+ VQS;
+pub_res(VQS) ->
+ VQS.
+
+make_publish(IsPersistent, PayloadFun, PropFun, N) ->
+ {rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10}),
+ false}.
+
+make_publish_delivered(IsPersistent, PayloadFun, PropFun, N) ->
+ {rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10})}.
+
+queue_name(Config, Name) ->
+ Name1 = iolist_to_binary(rabbit_ct_helpers:config_to_testcase_name(Config, Name)),
+ queue_name(Name1).
+
+queue_name(Name) ->
+ rabbit_misc:r(<<"/">>, queue, Name).
+
+test_queue() ->
+ queue_name(rabbit_guid:gen()).
+
+init_test_queue(QName) ->
+ PRef = rabbit_guid:gen(),
+ PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
+ Res = rabbit_queue_index:recover(
+ QName, [], false,
+ fun (MsgId) ->
+ rabbit_msg_store:contains(MsgId, PersistentClient)
+ end,
+ fun nop/1, fun nop/1),
+ ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
+ Res.
+
+restart_test_queue(Qi, QName) ->
+ _ = rabbit_queue_index:terminate(?VHOST, [], Qi),
+ ok = rabbit_variable_queue:stop(?VHOST),
+ {ok, _} = rabbit_variable_queue:start(?VHOST, [QName]),
+ init_test_queue(QName).
+
+empty_test_queue(QName) ->
+ ok = rabbit_variable_queue:stop(?VHOST),
+ {ok, _} = rabbit_variable_queue:start(?VHOST, []),
+ {0, 0, Qi} = init_test_queue(QName),
+ _ = rabbit_queue_index:delete_and_terminate(Qi),
+ ok.
+
+unin_empty_test_queue(QName) ->
+ {0, 0, Qi} = init_test_queue(QName),
+ _ = rabbit_queue_index:delete_and_terminate(Qi),
+ ok.
+
+with_empty_test_queue(Fun) ->
+ QName = test_queue(),
+ ok = empty_test_queue(QName),
+ {0, 0, Qi} = init_test_queue(QName),
+ rabbit_queue_index:delete_and_terminate(Fun(Qi, QName)).
+
+restart_app() ->
+ rabbit:stop(),
+ rabbit:start().
+
+queue_index_publish(SeqIds, Persistent, Qi) ->
+ Ref = rabbit_guid:gen(),
+ MsgStore = case Persistent of
+ true -> ?PERSISTENT_MSG_STORE;
+ false -> ?TRANSIENT_MSG_STORE
+ end,
+ MSCState = msg_store_client_init(MsgStore, Ref),
+ {A, B = [{_SeqId, LastMsgIdWritten} | _]} =
+ lists:foldl(
+ fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
+ MsgId = rabbit_guid:gen(),
+ QiM = rabbit_queue_index:publish(
+ MsgId, SeqId, #message_properties{size = 10},
+ Persistent, infinity, QiN),
+ ok = rabbit_msg_store:write(MsgId, MsgId, MSCState),
+ {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
+ end, {Qi, []}, SeqIds),
+ %% do this just to force all of the publishes through to the msg_store:
+ true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ {A, B}.
+
+verify_read_with_published(_Delivered, _Persistent, [], _) ->
+ ok;
+verify_read_with_published(Delivered, Persistent,
+ [{MsgId, SeqId, _Props, Persistent, Delivered}|Read],
+ [{SeqId, MsgId}|Published]) ->
+ verify_read_with_published(Delivered, Persistent, Read, Published);
+verify_read_with_published(_Delivered, _Persistent, _Read, _Published) ->
+ ko.
+
+nop(_) -> ok.
+nop(_, _) -> ok.
+
+msg_store_client_init(MsgStore, Ref) ->
+ rabbit_vhost_msg_store:client_init(?VHOST, MsgStore, Ref, undefined, undefined).
+
+variable_queue_init(Q, Recover) ->
+ rabbit_variable_queue:init(
+ Q, case Recover of
+ true -> non_clean_shutdown;
+ false -> new
+ end, fun nop/2, fun nop/2, fun nop/1, fun nop/1).
+
+publish_and_confirm(Q, Payload, Count) ->
+ Seqs = lists:seq(1, Count),
+ QTState0 = rabbit_queue_type:new(Q, rabbit_queue_type:init()),
+ QTState =
+ lists:foldl(
+ fun (Seq, Acc0) ->
+ Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = 2},
+ Payload),
+ Delivery = #delivery{mandatory = false, sender = self(),
+ confirm = true, message = Msg, msg_seq_no = Seq,
+ flow = noflow},
+ {ok, Acc, _Actions} = rabbit_queue_type:deliver([Q], Delivery, Acc0),
+ Acc
+ end, QTState0, Seqs),
+ wait_for_confirms(gb_sets:from_list(Seqs)),
+ QTState.
+
+wait_for_confirms(Unconfirmed) ->
+ case gb_sets:is_empty(Unconfirmed) of
+ true -> ok;
+ false ->
+ receive
+ {'$gen_cast',
+ {queue_event, _QName, {confirm, Confirmed, _}}} ->
+ wait_for_confirms(
+ rabbit_misc:gb_sets_difference(
+ Unconfirmed, gb_sets:from_list(Confirmed)));
+ {'$gen_cast', {confirm, Confirmed, _}} ->
+ wait_for_confirms(
+ rabbit_misc:gb_sets_difference(
+ Unconfirmed, gb_sets:from_list(Confirmed)))
+ after ?TIMEOUT ->
+ flush(),
+ exit(timeout_waiting_for_confirm)
+ end
+ end.
+
+with_fresh_variable_queue(Fun, Mode) ->
+ Ref = make_ref(),
+ Me = self(),
+ %% Run in a separate process since rabbit_msg_store will send
+ %% bump_credit messages and we want to ignore them
+ spawn_link(fun() ->
+ QName = test_queue(),
+ ok = unin_empty_test_queue(QName),
+ VQ = variable_queue_init(test_amqqueue(QName, true), false),
+ S0 = variable_queue_status(VQ),
+ assert_props(S0, [{q1, 0}, {q2, 0},
+ {delta,
+ {delta, undefined, 0, undefined}},
+ {q3, 0}, {q4, 0},
+ {len, 0}]),
+ VQ1 = set_queue_mode(Mode, VQ),
+ try
+ _ = rabbit_variable_queue:delete_and_terminate(
+ shutdown, Fun(VQ1, QName)),
+ Me ! Ref
+ catch
+ Type:Error:Stacktrace ->
+ Me ! {Ref, Type, Error, Stacktrace}
+ end
+ end),
+ receive
+ Ref -> ok;
+ {Ref, Type, Error, ST} -> exit({Type, Error, ST})
+ end,
+ passed.
+
+set_queue_mode(Mode, VQ) ->
+ VQ1 = rabbit_variable_queue:set_queue_mode(Mode, VQ),
+ S1 = variable_queue_status(VQ1),
+ assert_props(S1, [{mode, Mode}]),
+ VQ1.
+
+variable_queue_publish(IsPersistent, Count, VQ) ->
+ variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_publish(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_wait_for_shuffling_end(
+ lists:foldl(
+ fun (N, VQN) ->
+
+ rabbit_variable_queue:publish(
+ rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10}),
+ false, self(), noflow, VQN)
+ end, VQ, lists:seq(Start, Start + Count - 1))).
+
+variable_queue_batch_publish(IsPersistent, Count, VQ) ->
+ variable_queue_batch_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_batch_publish(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_batch_publish(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_batch_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
+ PayloadFun, fun make_publish/4,
+ fun rabbit_variable_queue:batch_publish/4,
+ VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Count, VQ) ->
+ variable_queue_batch_publish_delivered(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_batch_publish_delivered(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
+ PayloadFun, fun make_publish_delivered/4,
+ fun rabbit_variable_queue:batch_publish_delivered/4,
+ VQ).
+
+variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, PayloadFun,
+ MakePubFun, PubFun, VQ) ->
+ Publishes =
+ [MakePubFun(IsPersistent, PayloadFun, PropFun, N)
+ || N <- lists:seq(Start, Start + Count - 1)],
+ Res = PubFun(Publishes, self(), noflow, VQ),
+ VQ1 = pub_res(Res),
+ variable_queue_wait_for_shuffling_end(VQ1).
+
+variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
+ lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
+ Rem = Len - N,
+ {{#basic_message { is_persistent = IsPersistent },
+ IsDelivered, AckTagN}, VQM} =
+ rabbit_variable_queue:fetch(true, VQN),
+ Rem = rabbit_variable_queue:len(VQM),
+ {VQM, [AckTagN | AckTagsAcc]}
+ end, {VQ, []}, lists:seq(1, Count)).
+
+test_amqqueue(QName, Durable) ->
+ rabbit_amqqueue:pseudo_queue(QName, self(), Durable).
+
+assert_prop(List, Prop, Value) ->
+ case proplists:get_value(Prop, List)of
+ Value -> ok;
+ _ -> {exit, Prop, exp, Value, List}
+ end.
+
+assert_props(List, PropVals) ->
+ [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals].
+
+variable_queue_set_ram_duration_target(Duration, VQ) ->
+ variable_queue_wait_for_shuffling_end(
+ rabbit_variable_queue:set_ram_duration_target(Duration, VQ)).
+
+publish_fetch_and_ack(0, _Len, VQ0) ->
+ VQ0;
+publish_fetch_and_ack(N, Len, VQ0) ->
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
+ Len = rabbit_variable_queue:len(VQ2),
+ {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
+ publish_fetch_and_ack(N-1, Len, VQ3).
+
+variable_queue_status(VQ) ->
+ Keys = rabbit_backing_queue:info_keys() -- [backing_queue_status],
+ [{K, rabbit_variable_queue:info(K, VQ)} || K <- Keys] ++
+ rabbit_variable_queue:info(backing_queue_status, VQ).
+
+variable_queue_wait_for_shuffling_end(VQ) ->
+ case credit_flow:blocked() of
+ false -> VQ;
+ true ->
+ receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ variable_queue_wait_for_shuffling_end(
+ rabbit_variable_queue:resume(VQ))
+ end
+ end.
+
+msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
+ binary_to_term(list_to_binary(lists:reverse(P))).
+
+ack_subset(AckSeqs, Interval, Rem) ->
+ lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
+
+requeue_one_by_one(Acks, VQ) ->
+ lists:foldl(fun (AckTag, VQN) ->
+ {_MsgId, VQM} = rabbit_variable_queue:requeue(
+ [AckTag], VQN),
+ VQM
+ end, VQ, Acks).
+
+%% Create a vq with messages in q1, delta, and q3, and holes (in the
+%% form of pending acks) in the latter two.
+variable_queue_with_holes(VQ0) ->
+ Interval = 2048, %% should match vq:IO_BATCH_SIZE
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval,
+ Seq = lists:seq(1, Count),
+ VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+ VQ2 = variable_queue_publish(
+ false, 1, Count,
+ fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1),
+ {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+ Acks = lists:reverse(AcksR),
+ AckSeqs = lists:zip(Acks, Seq),
+ [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
+ [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
+ %% we requeue in three phases in order to exercise requeuing logic
+ %% in various vq states
+ {_MsgIds, VQ4} = rabbit_variable_queue:requeue(
+ Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
+ VQ5 = requeue_one_by_one(Subset1, VQ4),
+ %% by now we have some messages (and holes) in delta
+ VQ6 = requeue_one_by_one(Subset2, VQ5),
+ VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6),
+ %% add the q1 tail
+ VQ8 = variable_queue_publish(
+ true, Count + 1, Interval,
+ fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
+ %% assertions
+ Status = variable_queue_status(VQ8),
+
+ vq_with_holes_assertions(VQ8, proplists:get_value(mode, Status)),
+ Depth = Count + Interval,
+ Depth = rabbit_variable_queue:depth(VQ8),
+ Len = Depth - length(Subset3),
+ Len = rabbit_variable_queue:len(VQ8),
+
+ {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}.
+
+vq_with_holes_assertions(VQ, default) ->
+ [false =
+ case V of
+ {delta, _, 0, _} -> true;
+ 0 -> true;
+ _ -> false
+ end || {K, V} <- variable_queue_status(VQ),
+ lists:member(K, [q1, delta, q3])];
+vq_with_holes_assertions(VQ, lazy) ->
+ [false =
+ case V of
+ {delta, _, 0, _} -> true;
+ _ -> false
+ end || {K, V} <- variable_queue_status(VQ),
+ lists:member(K, [delta])].
+
+check_variable_queue_status(VQ0, Props) ->
+ VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
+ S = variable_queue_status(VQ1),
+ assert_props(S, Props),
+ VQ1.
+
+flush() ->
+ receive
+ Any ->
+ ct:pal("flush ~p", [Any]),
+ flush()
+ after 0 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/channel_interceptor_SUITE.erl b/deps/rabbit/test/channel_interceptor_SUITE.erl
new file mode 100644
index 0000000000..e0a8050598
--- /dev/null
+++ b/deps/rabbit/test/channel_interceptor_SUITE.erl
@@ -0,0 +1,108 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(channel_interceptor_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ register_interceptor,
+ register_failing_interceptors
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+register_interceptor(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, register_interceptor1, [Config, dummy_interceptor]).
+
+register_interceptor1(Config, Interceptor) ->
+ PredefinedChannels = rabbit_channel:list(),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ QName = <<"register_interceptor-q">>,
+ amqp_channel:call(Ch1, #'queue.declare'{queue = QName}),
+
+ [ChannelProc] = rabbit_channel:list() -- PredefinedChannels,
+
+ [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"bar">>),
+
+ ok = rabbit_registry:register(channel_interceptor,
+ <<"dummy interceptor">>,
+ Interceptor),
+ [{interceptors, [{Interceptor, undefined}]}] =
+ rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"">>),
+
+ ok = rabbit_registry:unregister(channel_interceptor,
+ <<"dummy interceptor">>),
+ [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"bar">>),
+ passed.
+
+register_failing_interceptors(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, register_interceptor1, [Config, failing_dummy_interceptor]).
+
+check_send_receive(Ch1, QName, Send, Receive) ->
+ amqp_channel:call(Ch1,
+ #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = Send}),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Receive}} =
+ amqp_channel:call(Ch1, #'basic.get'{queue = QName,
+ no_ack = true}).
diff --git a/deps/rabbit/test/channel_operation_timeout_SUITE.erl b/deps/rabbit/test/channel_operation_timeout_SUITE.erl
new file mode 100644
index 0000000000..15e0188604
--- /dev/null
+++ b/deps/rabbit/test/channel_operation_timeout_SUITE.erl
@@ -0,0 +1,198 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(channel_operation_timeout_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("amqqueue.hrl").
+
+-compile([export_all]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(CONFIG, [cluster_ab]).
+-define(DEFAULT_VHOST, <<"/">>).
+-define(QRESOURCE(Q), rabbit_misc:r(?DEFAULT_VHOST, queue, Q)).
+-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>).
+-define(DELAY, 25).
+
+all() ->
+ [
+ notify_down_all
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 2,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+notify_down_all(Config) ->
+ Rabbit = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ RabbitCh = rabbit_ct_client_helpers:open_channel(Config, 0),
+ HareCh = rabbit_ct_client_helpers:open_channel(Config, 1),
+
+ ct:pal("one"),
+ %% success
+ set_channel_operation_timeout_config(Config, 1000),
+ configure_bq(Config),
+ QCfg0 = qconfig(RabbitCh, <<"q0">>, <<"ex0">>, true, false),
+ declare(QCfg0),
+ ct:pal("two"),
+ %% Testing rabbit_amqqueue:notify_down_all via rabbit_channel.
+ %% Consumer count = 0 after correct channel termination and
+ %% notification of queues via delegate:call/3
+ true = (0 =/= length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST))),
+ rabbit_ct_client_helpers:close_channel(RabbitCh),
+ 0 = length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST)),
+ false = is_process_alive(RabbitCh),
+ ct:pal("three"),
+
+ %% fail
+ set_channel_operation_timeout_config(Config, 10),
+ QCfg2 = qconfig(HareCh, <<"q1">>, <<"ex1">>, true, false),
+ declare(QCfg2),
+ publish(QCfg2, ?TIMEOUT_TEST_MSG),
+ timer:sleep(?DELAY),
+ rabbit_ct_client_helpers:close_channel(HareCh),
+ timer:sleep(?DELAY),
+ false = is_process_alive(HareCh),
+
+ pass.
+
+%% -------------------------
+%% Internal helper functions
+%% -------------------------
+
+set_channel_operation_timeout_config(Config, Timeout) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ application, set_env, [rabbit, channel_operation_timeout, Timeout])],
+ ok.
+
+set_channel_operation_backing_queue(Config) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ application, set_env,
+ [rabbit, backing_queue_module, channel_operation_timeout_test_queue])],
+ ok.
+
+re_enable_priority_queue(Config) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ rabbit_priority_queue, enable, [])],
+ ok.
+
+declare(QCfg) ->
+ QDeclare = #'queue.declare'{queue = Q = pget(name, QCfg), durable = true},
+ #'queue.declare_ok'{} = amqp_channel:call(Ch = pget(ch, QCfg), QDeclare),
+
+ ExDeclare = #'exchange.declare'{exchange = Ex = pget(ex, QCfg)},
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, ExDeclare),
+
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Ch, #'queue.bind'{queue = Q,
+ exchange = Ex,
+ routing_key = Q}),
+ maybe_subscribe(QCfg).
+
+maybe_subscribe(QCfg) ->
+ case pget(consume, QCfg) of
+ true ->
+ Sub = #'basic.consume'{queue = pget(name, QCfg)},
+ Ch = pget(ch, QCfg),
+ Del = pget(deliver, QCfg),
+ amqp_channel:subscribe(Ch, Sub,
+ spawn(fun() -> consume(Ch, Del) end));
+ _ -> ok
+ end.
+
+consume(_Ch, false) -> receive_nothing();
+consume(Ch, Deliver = true) ->
+ receive
+ {#'basic.deliver'{}, _Msg} ->
+ consume(Ch, Deliver)
+ end.
+
+publish(QCfg, Msg) ->
+ Publish = #'basic.publish'{exchange = pget(ex, QCfg),
+ routing_key = pget(name, QCfg)},
+ amqp_channel:call(pget(ch, QCfg), Publish,
+ #amqp_msg{payload = Msg}).
+
+get_consumers(Config, Node, VHost) when is_atom(Node),
+ is_binary(VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_amqqueue, consumers_all, [VHost]).
+
+get_amqqueue(QName0, []) ->
+ throw({not_found, QName0});
+get_amqqueue(QName0, [Q | Rem]) when ?is_amqqueue(Q) ->
+ QName1 = amqqueue:get_name(Q),
+ compare_amqqueue(QName0, QName1, Q, Rem).
+
+compare_amqqueue(QName, QName, Q, _Rem) ->
+ Q;
+compare_amqqueue(QName, _, _, Rem) ->
+ get_amqqueue(QName, Rem).
+
+qconfig(Ch, Name, Ex, Consume, Deliver) ->
+ [{ch, Ch}, {name, Name}, {ex,Ex}, {consume, Consume}, {deliver, Deliver}].
+
+receive_nothing() ->
+ receive
+ after infinity -> void
+ end.
+
+unhandled_req(Fun) ->
+ try
+ Fun()
+ catch
+ exit:{{shutdown,{_, ?NOT_FOUND, _}}, _} -> ok;
+ _:Reason -> {error, Reason}
+ end.
+
+configure_bq(Config) ->
+ ok = set_channel_operation_backing_queue(Config),
+ ok = re_enable_priority_queue(Config),
+ ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config,
+ ?MODULE).
diff --git a/deps/rabbit/test/channel_operation_timeout_test_queue.erl b/deps/rabbit/test/channel_operation_timeout_test_queue.erl
new file mode 100644
index 0000000000..3190dad7a8
--- /dev/null
+++ b/deps/rabbit/test/channel_operation_timeout_test_queue.erl
@@ -0,0 +1,323 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(channel_operation_timeout_test_queue).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+ purge/1, purge_acks/1,
+ publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
+ discard/4, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+ ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+ set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+ handle_pre_hibernate/1, resume/1, msg_rates/1,
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ start/2, stop/1, zip_msgs_and_acks/4, handle_info/2]).
+
+%%----------------------------------------------------------------------------
+%% This test backing queue follows the variable queue implementation, with
+%% the exception that it will introduce infinite delays on some operations if
+%% the test message has been published, and is awaiting acknowledgement in the
+%% queue index. Test message is "timeout_test_msg!".
+%%
+%%----------------------------------------------------------------------------
+
+-behaviour(rabbit_backing_queue).
+
+-record(vqstate,
+ { q1,
+ q2,
+ delta,
+ q3,
+ q4,
+ next_seq_id,
+ ram_pending_ack, %% msgs using store, still in RAM
+ disk_pending_ack, %% msgs in store, paged out
+ qi_pending_ack, %% msgs using qi, *can't* be paged out
+ index_state,
+ msg_store_clients,
+ durable,
+ transient_threshold,
+ qi_embed_msgs_below,
+
+ len, %% w/o unacked
+ bytes, %% w/o unacked
+ unacked_bytes,
+ persistent_count, %% w unacked
+ persistent_bytes, %% w unacked
+ delta_transient_bytes, %%
+
+ target_ram_count,
+ ram_msg_count, %% w/o unacked
+ ram_msg_count_prev,
+ ram_ack_count_prev,
+ ram_bytes, %% w unacked
+ out_counter,
+ in_counter,
+ rates,
+ msgs_on_disk,
+ msg_indices_on_disk,
+ unconfirmed,
+ confirmed,
+ ack_out_counter,
+ ack_in_counter,
+ %% Unlike the other counters these two do not feed into
+ %% #rates{} and get reset
+ disk_read_count,
+ disk_write_count,
+
+ io_batch_size,
+
+ %% default queue or lazy queue
+ mode,
+ %% number of reduce_memory_usage executions, once it
+ %% reaches a threshold the queue will manually trigger a runtime GC
+ %% see: maybe_execute_gc/1
+ memory_reduction_run_count,
+ %% Queue data is grouped by VHost. We need to store it
+ %% to work with queue index.
+ virtual_host,
+ waiting_bump = false
+ }).
+
+-record(rates, { in, out, ack_in, ack_out, timestamp }).
+
+-record(msg_status,
+ { seq_id,
+ msg_id,
+ msg,
+ is_persistent,
+ is_delivered,
+ msg_in_store,
+ index_on_disk,
+ persist_to,
+ msg_props
+ }).
+
+-record(delta,
+ { start_seq_id, %% start_seq_id is inclusive
+ count,
+ transient,
+ end_seq_id %% end_seq_id is exclusive
+ }).
+
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-define(QUEUE, lqueue).
+-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>).
+
+%%----------------------------------------------------------------------------
+
+-type seq_id() :: non_neg_integer().
+
+-type rates() :: #rates { in :: float(),
+ out :: float(),
+ ack_in :: float(),
+ ack_out :: float(),
+ timestamp :: rabbit_types:timestamp()}.
+
+-type delta() :: #delta { start_seq_id :: non_neg_integer(),
+ count :: non_neg_integer(),
+ end_seq_id :: non_neg_integer() }.
+
+%% The compiler (rightfully) complains that ack() and state() are
+%% unused. For this reason we duplicate a -spec from
+%% rabbit_backing_queue with the only intent being to remove
+%% warnings. The problem here is that we can't parameterise the BQ
+%% behaviour by these two types as we would like to. We still leave
+%% these here for documentation purposes.
+-type ack() :: seq_id().
+-type state() :: #vqstate {
+ q1 :: ?QUEUE:?QUEUE(),
+ q2 :: ?QUEUE:?QUEUE(),
+ delta :: delta(),
+ q3 :: ?QUEUE:?QUEUE(),
+ q4 :: ?QUEUE:?QUEUE(),
+ next_seq_id :: seq_id(),
+ ram_pending_ack :: gb_trees:tree(),
+ disk_pending_ack :: gb_trees:tree(),
+ qi_pending_ack :: gb_trees:tree(),
+ index_state :: any(),
+ msg_store_clients :: 'undefined' | {{any(), binary()},
+ {any(), binary()}},
+ durable :: boolean(),
+ transient_threshold :: non_neg_integer(),
+ qi_embed_msgs_below :: non_neg_integer(),
+
+ len :: non_neg_integer(),
+ bytes :: non_neg_integer(),
+ unacked_bytes :: non_neg_integer(),
+
+ persistent_count :: non_neg_integer(),
+ persistent_bytes :: non_neg_integer(),
+
+ target_ram_count :: non_neg_integer() | 'infinity',
+ ram_msg_count :: non_neg_integer(),
+ ram_msg_count_prev :: non_neg_integer(),
+ ram_ack_count_prev :: non_neg_integer(),
+ ram_bytes :: non_neg_integer(),
+ out_counter :: non_neg_integer(),
+ in_counter :: non_neg_integer(),
+ rates :: rates(),
+ msgs_on_disk :: gb_sets:set(),
+ msg_indices_on_disk :: gb_sets:set(),
+ unconfirmed :: gb_sets:set(),
+ confirmed :: gb_sets:set(),
+ ack_out_counter :: non_neg_integer(),
+ ack_in_counter :: non_neg_integer(),
+ disk_read_count :: non_neg_integer(),
+ disk_write_count :: non_neg_integer(),
+
+ io_batch_size :: pos_integer(),
+ mode :: 'default' | 'lazy',
+ virtual_host :: rabbit_types:vhost() }.
+%% Duplicated from rabbit_backing_queue
+-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}.
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start(VHost, DurableQueues) ->
+ rabbit_variable_queue:start(VHost, DurableQueues).
+
+stop(VHost) ->
+ rabbit_variable_queue:stop(VHost).
+
+init(Queue, Recover, Callback) ->
+ rabbit_variable_queue:init(Queue, Recover, Callback).
+
+terminate(Reason, State) ->
+ rabbit_variable_queue:terminate(Reason, State).
+
+delete_and_terminate(Reason, State) ->
+ rabbit_variable_queue:delete_and_terminate(Reason, State).
+
+delete_crashed(Q) ->
+ rabbit_variable_queue:delete_crashed(Q).
+
+purge(State = #vqstate { qi_pending_ack= QPA }) ->
+ maybe_delay(QPA),
+ rabbit_variable_queue:purge(State).
+
+purge_acks(State) ->
+ rabbit_variable_queue:purge_acks(State).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
+ rabbit_variable_queue:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State).
+
+batch_publish(Publishes, ChPid, Flow, State) ->
+ rabbit_variable_queue:batch_publish(Publishes, ChPid, Flow, State).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
+ rabbit_variable_queue:publish_delivered(Msg, MsgProps, ChPid, Flow, State).
+
+batch_publish_delivered(Publishes, ChPid, Flow, State) ->
+ rabbit_variable_queue:batch_publish_delivered(Publishes, ChPid, Flow, State).
+
+discard(_MsgId, _ChPid, _Flow, State) -> State.
+
+drain_confirmed(State) ->
+ rabbit_variable_queue:drain_confirmed(State).
+
+dropwhile(Pred, State) ->
+ rabbit_variable_queue:dropwhile(Pred, State).
+
+fetchwhile(Pred, Fun, Acc, State) ->
+ rabbit_variable_queue:fetchwhile(Pred, Fun, Acc, State).
+
+fetch(AckRequired, State) ->
+ rabbit_variable_queue:fetch(AckRequired, State).
+
+drop(AckRequired, State) ->
+ rabbit_variable_queue:drop(AckRequired, State).
+
+ack(List, State) ->
+ rabbit_variable_queue:ack(List, State).
+
+requeue(AckTags, #vqstate { qi_pending_ack = QPA } = State) ->
+ maybe_delay(QPA),
+ rabbit_variable_queue:requeue(AckTags, State).
+
+ackfold(MsgFun, Acc, State, AckTags) ->
+ rabbit_variable_queue:ackfold(MsgFun, Acc, State, AckTags).
+
+fold(Fun, Acc, State) ->
+ rabbit_variable_queue:fold(Fun, Acc, State).
+
+len(#vqstate { qi_pending_ack = QPA } = State) ->
+ maybe_delay(QPA),
+ rabbit_variable_queue:len(State).
+
+is_empty(State) -> 0 == len(State).
+
+depth(State) ->
+ rabbit_variable_queue:depth(State).
+
+set_ram_duration_target(DurationTarget, State) ->
+ rabbit_variable_queue:set_ram_duration_target(DurationTarget, State).
+
+ram_duration(State) ->
+ rabbit_variable_queue:ram_duration(State).
+
+needs_timeout(State) ->
+ rabbit_variable_queue:needs_timeout(State).
+
+timeout(State) ->
+ rabbit_variable_queue:timeout(State).
+
+handle_pre_hibernate(State) ->
+ rabbit_variable_queue:handle_pre_hibernate(State).
+
+handle_info(Msg, State) ->
+ rabbit_variable_queue:handle_info(Msg, State).
+
+resume(State) -> rabbit_variable_queue:resume(State).
+
+msg_rates(State) ->
+ rabbit_variable_queue:msg_rates(State).
+
+info(Info, State) ->
+ rabbit_variable_queue:info(Info, State).
+
+invoke(Module, Fun, State) -> rabbit_variable_queue:invoke(Module, Fun, State).
+
+is_duplicate(Msg, State) -> rabbit_variable_queue:is_duplicate(Msg, State).
+
+set_queue_mode(Mode, State) ->
+ rabbit_variable_queue:set_queue_mode(Mode, State).
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, State) ->
+ rabbit_variable_queue:zip_msgs_and_acks(Msgs, AckTags, Accumulator, State).
+
+%% Delay
+maybe_delay(QPA) ->
+ case is_timeout_test(gb_trees:values(QPA)) of
+ true -> receive
+ %% The queue received an EXIT message, it's probably the
+ %% node being stopped with "rabbitmqctl stop". Thus, abort
+ %% the wait and requeue the EXIT message.
+ {'EXIT', _, shutdown} = ExitMsg -> self() ! ExitMsg,
+ void
+ after infinity -> void
+ end;
+ _ -> void
+ end.
+
+is_timeout_test([]) -> false;
+is_timeout_test([#msg_status{
+ msg = #basic_message{
+ content = #content{
+ payload_fragments_rev = PFR}}}|Rem]) ->
+ case lists:member(?TIMEOUT_TEST_MSG, PFR) of
+ T = true -> T;
+ _ -> is_timeout_test(Rem)
+ end;
+is_timeout_test([_|Rem]) -> is_timeout_test(Rem).
diff --git a/deps/rabbit/test/cluster_SUITE.erl b/deps/rabbit/test/cluster_SUITE.erl
new file mode 100644
index 0000000000..9df196a8ed
--- /dev/null
+++ b/deps/rabbit/test/cluster_SUITE.erl
@@ -0,0 +1,307 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(cluster_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("include/amqqueue.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+-define(CLUSTER_TESTCASES, [
+ delegates_async,
+ delegates_sync,
+ queue_cleanup,
+ declare_on_dead_queue
+ ]).
+
+all() ->
+ [
+ {group, cluster_tests}
+ ].
+
+groups() ->
+ [
+ {cluster_tests, [], [
+ {from_cluster_node1, [], ?CLUSTER_TESTCASES},
+ {from_cluster_node2, [], ?CLUSTER_TESTCASES}
+ ]}
+ ].
+
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun(C) -> init_per_group1(Group, C) end
+ ]);
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [
+ fun(C) -> init_per_group1(Group, C) end
+ ])
+ end.
+
+init_per_group1(from_cluster_node1, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {0, 1}});
+init_per_group1(from_cluster_node2, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {1, 0}});
+init_per_group1(_, Config) ->
+ Config.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% ---------------------------------------------------------------------------
+%% Cluster-dependent tests.
+%% ---------------------------------------------------------------------------
+
+delegates_async(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, delegates_async1, [Config, To]).
+
+delegates_async1(_Config, SecondaryNode) ->
+ Self = self(),
+ Sender = fun (Pid) -> Pid ! {invoked, Self} end,
+
+ Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end),
+
+ ok = delegate:invoke_no_result(spawn(Responder), Sender),
+ ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender),
+ await_response(2),
+
+ passed.
+
+delegates_sync(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, delegates_sync1, [Config, To]).
+
+delegates_sync1(_Config, SecondaryNode) ->
+ Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end,
+ BadSender = fun (_Pid) -> exit(exception) end,
+
+ Responder = make_responder(fun ({'$gen_call', From, invoked}) ->
+ gen_server:reply(From, response)
+ end),
+
+ BadResponder = make_responder(fun ({'$gen_call', From, invoked}) ->
+ gen_server:reply(From, response)
+ end, bad_responder_died),
+
+ response = delegate:invoke(spawn(Responder), Sender),
+ response = delegate:invoke(spawn(SecondaryNode, Responder), Sender),
+
+ must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end),
+ must_exit(fun () ->
+ delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end),
+
+ LocalGoodPids = spawn_responders(node(), Responder, 2),
+ RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2),
+ LocalBadPids = spawn_responders(node(), BadResponder, 2),
+ RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2),
+
+ {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender),
+ true = lists:all(fun ({_, response}) -> true end, GoodRes),
+ GoodResPids = [Pid || {Pid, _} <- GoodRes],
+
+ Good = lists:usort(LocalGoodPids ++ RemoteGoodPids),
+ Good = lists:usort(GoodResPids),
+
+ {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender),
+ true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes),
+ BadResPids = [Pid || {Pid, _} <- BadRes],
+
+ Bad = lists:usort(LocalBadPids ++ RemoteBadPids),
+ Bad = lists:usort(BadResPids),
+
+ MagicalPids = [rabbit_misc:string_to_pid(Str) ||
+ Str <- ["<nonode@nohost.0.1.0>", "<nonode@nohost.0.2.0>"]],
+ {[], BadNodes} = delegate:invoke(MagicalPids, Sender),
+ true = lists:all(
+ fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end,
+ BadNodes),
+ BadNodesPids = [Pid || {Pid, _} <- BadNodes],
+
+ Magical = lists:usort(MagicalPids),
+ Magical = lists:usort(BadNodesPids),
+
+ passed.
+
+queue_cleanup(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, queue_cleanup1, [Config, To]).
+
+queue_cleanup1(_Config, _SecondaryNode) ->
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }),
+ receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} ->
+ ok
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ rabbit_channel:shutdown(Ch),
+ rabbit:stop(),
+ rabbit:start(),
+ {_Writer2, Ch2} = test_spawn(),
+ rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
+ queue = ?CLEANUP_QUEUE_NAME }),
+ receive
+ #'channel.close'{reply_code = ?NOT_FOUND} ->
+ ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_exit)
+ end,
+ rabbit_channel:shutdown(Ch2),
+ passed.
+
+declare_on_dead_queue(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, declare_on_dead_queue1, [Config, To]).
+
+declare_on_dead_queue1(_Config, SecondaryNode) ->
+ QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME),
+ Self = self(),
+ Pid = spawn(SecondaryNode,
+ fun () ->
+ {new, Q} = rabbit_amqqueue:declare(QueueName, false, false, [], none, <<"acting-user">>),
+ QueueName = ?amqqueue_field_name(Q),
+ QPid = ?amqqueue_field_pid(Q),
+ exit(QPid, kill),
+ Self ! {self(), killed, QPid}
+ end),
+ receive
+ {Pid, killed, OldPid} ->
+ Q = dead_queue_loop(QueueName, OldPid),
+ {ok, 0} = rabbit_amqqueue:delete(Q, false, false, <<"acting-user">>),
+ passed
+ after ?TIMEOUT -> throw(failed_to_create_and_kill_queue)
+ end.
+
+
+make_responder(FMsg) -> make_responder(FMsg, timeout).
+make_responder(FMsg, Throw) ->
+ fun () ->
+ receive Msg -> FMsg(Msg)
+ after ?TIMEOUT -> throw(Throw)
+ end
+ end.
+
+spawn_responders(Node, Responder, Count) ->
+ [spawn(Node, Responder) || _ <- lists:seq(1, Count)].
+
+await_response(0) ->
+ ok;
+await_response(Count) ->
+ receive
+ response -> ok,
+ await_response(Count - 1)
+ after ?TIMEOUT -> throw(timeout)
+ end.
+
+must_exit(Fun) ->
+ try
+ Fun(),
+ throw(exit_not_thrown)
+ catch
+ exit:_ -> ok
+ end.
+
+dead_queue_loop(QueueName, OldPid) ->
+ {existing, Q} = rabbit_amqqueue:declare(QueueName, false, false, [], none, <<"acting-user">>),
+ QPid = ?amqqueue_field_pid(Q),
+ case QPid of
+ OldPid -> timer:sleep(25),
+ dead_queue_loop(QueueName, OldPid);
+ _ -> true = rabbit_misc:is_process_alive(QPid),
+ Q
+ end.
+
+
+test_spawn() ->
+ {Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+ ok = rabbit_channel:do(Ch, #'channel.open'{}),
+ receive #'channel.open_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+ end,
+ {Writer, Ch}.
+
+test_spawn(Node) ->
+ rpc:call(Node, ?MODULE, test_spawn_remote, []).
+
+%% Spawn an arbitrary long lived process, so we don't end up linking
+%% the channel to the short-lived process (RPC, here) spun up by the
+%% RPC server.
+test_spawn_remote() ->
+ RPC = self(),
+ spawn(fun () ->
+ {Writer, Ch} = test_spawn(),
+ RPC ! {Writer, Ch},
+ link(Ch),
+ receive
+ _ -> ok
+ end
+ end),
+ receive Res -> Res
+ after ?TIMEOUT -> throw(failed_to_receive_result)
+ end.
+
+queue_name(Config, Name) ->
+ Name1 = iolist_to_binary(rabbit_ct_helpers:config_to_testcase_name(Config, Name)),
+ queue_name(Name1).
+
+queue_name(Name) ->
+ rabbit_misc:r(<<"/">>, queue, Name).
diff --git a/deps/rabbit/test/cluster_rename_SUITE.erl b/deps/rabbit/test/cluster_rename_SUITE.erl
new file mode 100644
index 0000000000..cdf02c9643
--- /dev/null
+++ b/deps/rabbit/test/cluster_rename_SUITE.erl
@@ -0,0 +1,301 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(cluster_rename_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ % XXX post_change_nodename,
+ abortive_rename,
+ rename_fail,
+ rename_twice_fail
+ ]},
+ {cluster_size_3, [], [
+ rename_cluster_one_by_one,
+ rename_cluster_big_bang,
+ partial_one_by_one,
+ partial_big_bang
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 15}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2} %% Replaced with a list of node names later.
+ ]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3} %% Replaced with a list of node names later.
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Nodenames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, ClusterSize)
+ ],
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, Nodenames},
+ {rmq_nodes_clustered, true}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = case rabbit_ct_helpers:get_config(Config, save_config) of
+ undefined -> Config;
+ C -> C
+ end,
+ Config2 = rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config2, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% Rolling rename of a cluster, each node should do a secondary rename.
+rename_cluster_one_by_one(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ Config1 = stop_rename_start(Config, Node1, [Node1, jessica]),
+ Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]),
+ Config3 = stop_rename_start(Config2, Node3, [Node3, flopsy]),
+
+ [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(
+ Config3, nodename),
+ consume_all(Config3,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config3}.
+
+%% Big bang rename of a cluster, Node1 should do a primary rename.
+rename_cluster_big_bang(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node3),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+
+ Map = [Node1, jessica, Node2, hazel, Node3, flopsy],
+ Config1 = rename_node(Config, Node1, Map),
+ Config2 = rename_node(Config1, Node2, Map),
+ Config3 = rename_node(Config2, Node3, Map),
+
+ [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(
+ Config3, nodename),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Jessica),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Hazel),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Flopsy),
+
+ consume_all(Config3,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config3}.
+
+%% Here we test that Node1 copes with things being renamed around it.
+partial_one_by_one(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ Config1 = stop_rename_start(Config, Node1, [Node1, jessica]),
+ Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]),
+
+ [Jessica, Hazel, Node3] = rabbit_ct_broker_helpers:get_node_configs(
+ Config2, nodename),
+ consume_all(Config2,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Node3, <<"3">>}]),
+ {save_config, Config2}.
+
+%% Here we test that Node1 copes with things being renamed around it.
+partial_big_bang(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node3),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+
+ Map = [Node2, hazel, Node3, flopsy],
+ Config1 = rename_node(Config, Node2, Map),
+ Config2 = rename_node(Config1, Node3, Map),
+
+ [Node1, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(Config2,
+ nodename),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Node1),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Hazel),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Flopsy),
+
+ consume_all(Config2,
+ [{Node1, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config2}.
+
+% XXX %% We should be able to specify the -n parameter on ctl with either
+% XXX %% the before or after name for the local node (since in real cases
+% XXX %% one might want to invoke the command before or after the hostname
+% XXX %% has changed) - usually we test before so here we test after.
+% XXX post_change_nodename([Node1, _Bigwig]) ->
+% XXX publish(Node1, <<"Node1">>),
+% XXX
+% XXX Bugs1 = rabbit_test_configs:stop_node(Node1),
+% XXX Bugs2 = [{nodename, jessica} | proplists:delete(nodename, Bugs1)],
+% XXX Jessica0 = rename_node(Bugs2, jessica, [Node1, jessica]),
+% XXX Jessica = rabbit_test_configs:start_node(Jessica0),
+% XXX
+% XXX consume(Jessica, <<"Node1">>),
+% XXX stop_all([Jessica]),
+% XXX ok.
+
+%% If we invoke rename but the node name does not actually change, we
+%% should roll back.
+abortive_rename(Config) ->
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ publish(Config, Node1, <<"Node1">>),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ _Config1 = rename_node(Config, Node1, [Node1, jessica]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Node1),
+
+ consume(Config, Node1, <<"Node1">>),
+ ok.
+
+%% And test some ways the command can fail.
+rename_fail(Config) ->
+ [Node1, Node2] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ %% Rename from a node that does not exist
+ ok = rename_node_fail(Config, Node1, [bugzilla, jessica]),
+ %% Rename to a node which does
+ ok = rename_node_fail(Config, Node1, [Node1, Node2]),
+ %% Rename two nodes to the same thing
+ ok = rename_node_fail(Config, Node1, [Node1, jessica, Node2, jessica]),
+ %% Rename while impersonating a node not in the cluster
+ Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Node1,
+ {nodename, 'rabbit@localhost'}),
+ ok = rename_node_fail(Config1, Node1, [Node1, jessica]),
+ ok.
+
+rename_twice_fail(Config) ->
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ Config1 = rename_node(Config, Node1, [Node1, indecisive]),
+ ok = rename_node_fail(Config, Node1, [indecisive, jessica]),
+ {save_config, Config1}.
+
+%% ----------------------------------------------------------------------------
+
+stop_rename_start(Config, Nodename, Map) ->
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Nodename),
+ Config1 = rename_node(Config, Nodename, Map),
+ ok = rabbit_ct_broker_helpers:start_node(Config1, Nodename),
+ Config1.
+
+rename_node(Config, Nodename, Map) ->
+ {ok, Config1} = do_rename_node(Config, Nodename, Map),
+ Config1.
+
+rename_node_fail(Config, Nodename, Map) ->
+ {error, _, _} = do_rename_node(Config, Nodename, Map),
+ ok.
+
+do_rename_node(Config, Nodename, Map) ->
+ Map1 = [
+ begin
+ NStr = atom_to_list(N),
+ case lists:member($@, NStr) of
+ true -> N;
+ false -> rabbit_nodes:make({NStr, "localhost"})
+ end
+ end
+ || N <- Map
+ ],
+ Ret = rabbit_ct_broker_helpers:rabbitmqctl(Config, Nodename,
+ ["rename_cluster_node" | Map1], 120000),
+ case Ret of
+ {ok, _} ->
+ Config1 = update_config_after_rename(Config, Map1),
+ {ok, Config1};
+ {error, _, _} = Error ->
+ Error
+ end.
+
+update_config_after_rename(Config, [Old, New | Rest]) ->
+ Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Old,
+ {nodename, New}),
+ update_config_after_rename(Config1, Rest);
+update_config_after_rename(Config, []) ->
+ Config.
+
+publish(Config, Node, Q) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Q}),
+ amqp_channel:wait_for_confirms(Ch),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, Node).
+
+consume(Config, Node, Q) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Q}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q}),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, Node).
+
+
+publish_all(Config, Nodes) ->
+ [publish(Config, Node, Key) || {Node, Key} <- Nodes].
+
+consume_all(Config, Nodes) ->
+ [consume(Config, Node, Key) || {Node, Key} <- Nodes].
+
+set_node(Nodename, Cfg) ->
+ [{nodename, Nodename} | proplists:delete(nodename, Cfg)].
diff --git a/deps/rabbit/test/clustering_management_SUITE.erl b/deps/rabbit/test/clustering_management_SUITE.erl
new file mode 100644
index 0000000000..550a30b511
--- /dev/null
+++ b/deps/rabbit/test/clustering_management_SUITE.erl
@@ -0,0 +1,861 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(clustering_management_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+all() ->
+ [
+ {group, unclustered_2_nodes},
+ {group, unclustered_3_nodes},
+ {group, clustered_2_nodes},
+ {group, clustered_4_nodes}
+ ].
+
+groups() ->
+ [
+ {unclustered_2_nodes, [], [
+ {cluster_size_2, [], [
+ classic_config_discovery_node_list
+ ]}
+ ]},
+ {unclustered_3_nodes, [], [
+ {cluster_size_3, [], [
+ join_and_part_cluster,
+ join_cluster_bad_operations,
+ join_to_start_interval,
+ forget_cluster_node,
+ change_cluster_node_type,
+ change_cluster_when_node_offline,
+ update_cluster_nodes,
+ force_reset_node
+ ]}
+ ]},
+ {clustered_2_nodes, [], [
+ {cluster_size_2, [], [
+ forget_removes_things,
+ reset_removes_things,
+ forget_offline_removes_things,
+ force_boot,
+ status_with_alarm,
+ pid_file_and_await_node_startup,
+ await_running_count,
+ start_with_invalid_schema_in_path,
+ persistent_cluster_id
+ ]}
+ ]},
+ {clustered_4_nodes, [], [
+ {cluster_size_4, [], [
+ forget_promotes_offline_follower
+ ]}
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 15}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:merge_app_env(
+ Config, {rabbit, [
+ {mnesia_table_loading_retry_limit, 2},
+ {mnesia_table_loading_retry_timeout,1000}
+ ]}),
+ rabbit_ct_helpers:run_setup_steps(Config1).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(unclustered_2_nodes, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(unclustered_3_nodes, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered_2_nodes, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(clustered_4_nodes, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]);
+init_per_group(cluster_size_4, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 4}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}},
+ {keep_pid_file_on_exit, true}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+start_with_invalid_schema_in_path(Config) ->
+ [Rabbit, Hare] = cluster_members(Config),
+ stop_app(Rabbit),
+ stop_app(Hare),
+
+ create_bad_schema(Rabbit, Hare, Config),
+
+ start_app(Hare),
+ case start_app(Rabbit) of
+ ok -> ok;
+ ErrRabbit -> error({unable_to_start_with_bad_schema_in_work_dir, ErrRabbit})
+ end.
+
+persistent_cluster_id(Config) ->
+ case os:getenv("SECONDARY_UMBRELLA") of
+ false ->
+ [Rabbit, Hare] = cluster_members(Config),
+ ClusterIDA1 = rpc:call(Rabbit, rabbit_nodes, persistent_cluster_id, []),
+ ClusterIDB1 = rpc:call(Hare, rabbit_nodes, persistent_cluster_id, []),
+ ?assertEqual(ClusterIDA1, ClusterIDB1),
+
+ rabbit_ct_broker_helpers:restart_node(Config, Rabbit),
+ ClusterIDA2 = rpc:call(Rabbit, rabbit_nodes, persistent_cluster_id, []),
+ rabbit_ct_broker_helpers:restart_node(Config, Hare),
+ ClusterIDB2 = rpc:call(Hare, rabbit_nodes, persistent_cluster_id, []),
+ ?assertEqual(ClusterIDA1, ClusterIDA2),
+ ?assertEqual(ClusterIDA2, ClusterIDB2);
+ _ ->
+ %% skip the test in mixed version mode
+ {skip, "Should not run in mixed version environments"}
+ end.
+
+create_bad_schema(Rabbit, Hare, Config) ->
+ {ok, RabbitMnesiaDir} = rpc:call(Rabbit, application, get_env, [mnesia, dir]),
+ {ok, HareMnesiaDir} = rpc:call(Hare, application, get_env, [mnesia, dir]),
+ %% Make sure we don't use the current dir:
+ PrivDir = ?config(priv_dir, Config),
+ ct:pal("Priv dir ~p~n", [PrivDir]),
+ ok = filelib:ensure_dir(filename:join(PrivDir, "file")),
+
+ ok = rpc:call(Rabbit, file, set_cwd, [PrivDir]),
+ ok = rpc:call(Hare, file, set_cwd, [PrivDir]),
+
+ ok = rpc:call(Rabbit, application, unset_env, [mnesia, dir]),
+ ok = rpc:call(Hare, application, unset_env, [mnesia, dir]),
+ ok = rpc:call(Rabbit, mnesia, create_schema, [[Rabbit, Hare]]),
+ ok = rpc:call(Rabbit, mnesia, start, []),
+ {atomic,ok} = rpc:call(Rabbit, mnesia, create_table,
+ [rabbit_queue, [{ram_copies, [Rabbit, Hare]}]]),
+ stopped = rpc:call(Rabbit, mnesia, stop, []),
+ ok = rpc:call(Rabbit, application, set_env, [mnesia, dir, RabbitMnesiaDir]),
+ ok = rpc:call(Hare, application, set_env, [mnesia, dir, HareMnesiaDir]).
+
+join_and_part_cluster(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+ assert_not_clustered(Rabbit),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Bunny),
+
+ stop_join_start(Rabbit, Bunny),
+ assert_clustered([Rabbit, Bunny]),
+
+ stop_join_start(Hare, Bunny, true),
+ assert_cluster_status(
+ {[Bunny, Hare, Rabbit], [Bunny, Rabbit], [Bunny, Hare, Rabbit]},
+ [Rabbit, Hare, Bunny]),
+
+ %% Allow clustering with already clustered node
+ ok = stop_app(Rabbit),
+ {ok, <<"The node is already a member of this cluster">>} =
+ join_cluster(Rabbit, Hare),
+ ok = start_app(Rabbit),
+
+ stop_reset_start(Rabbit),
+ assert_not_clustered(Rabbit),
+ assert_cluster_status({[Bunny, Hare], [Bunny], [Bunny, Hare]},
+ [Hare, Bunny]),
+
+ stop_reset_start(Hare),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Bunny).
+
+join_cluster_bad_operations(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ UsePrelaunch = rabbit_ct_broker_helpers:rpc(
+ Config, Hare,
+ erlang, function_exported,
+ [rabbit_prelaunch, get_context, 0]),
+
+ %% Nonexistent node
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> join_cluster(Rabbit, non@existent) end),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Trying to cluster with mnesia running
+ assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+ assert_not_clustered(Rabbit),
+
+ %% Trying to cluster the node with itself
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> join_cluster(Rabbit, Rabbit) end),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Do not let the node leave the cluster or reset if it's the only
+ %% ram node
+ stop_join_start(Hare, Rabbit, true),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+ ok = stop_app(Hare),
+ assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+ assert_failure(fun () -> reset(Rabbit) end),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+
+ %% Cannot start RAM-only node first
+ ok = stop_app(Rabbit),
+ ok = stop_app(Hare),
+ assert_failure(fun () -> start_app(Hare) end),
+ ok = start_app(Rabbit),
+ case UsePrelaunch of
+ true ->
+ ok = start_app(Hare);
+ false ->
+ %% The Erlang VM has stopped after previous rabbit app failure
+ ok = rabbit_ct_broker_helpers:start_node(Config, Hare)
+ end,
+ ok.
+
+%% This tests that the nodes in the cluster are notified immediately of a node
+%% join, and not just after the app is started.
+join_to_start_interval(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ ok = stop_app(Rabbit),
+ ok = join_cluster(Rabbit, Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ ok = start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]).
+
+forget_cluster_node(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Trying to remove a node not in the cluster should fail
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ %% Trying to remove an online node should fail
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+ ok = stop_app(Rabbit),
+ %% We're passing the --offline flag, but Hare is online
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit, true) end),
+ %% Removing some nonexistent node will fail
+ assert_failure(fun () -> forget_cluster_node(Hare, non@existent) end),
+ ok = forget_cluster_node(Hare, Rabbit),
+ assert_not_clustered(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit]),
+
+ %% Now we can't start Rabbit since it thinks that it's still in the cluster
+ %% with Hare, while Hare disagrees.
+ assert_failure(fun () -> start_app(Rabbit) end),
+
+ ok = reset(Rabbit),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Now we remove Rabbit from an offline node.
+ stop_join_start(Bunny, Hare),
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare, Bunny]),
+ ok = stop_app(Hare),
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ %% This is fine but we need the flag
+ assert_failure(fun () -> forget_cluster_node(Hare, Bunny) end),
+ %% Also fails because hare node is still running
+ assert_failure(fun () -> forget_cluster_node(Hare, Bunny, true) end),
+ %% But this works
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["forget_cluster_node", "--offline", Bunny]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
+ ok = start_app(Rabbit),
+ %% Bunny still thinks its clustered with Rabbit and Hare
+ assert_failure(fun () -> start_app(Bunny) end),
+ ok = reset(Bunny),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+ assert_clustered([Rabbit, Hare]).
+
+forget_removes_things(Config) ->
+ test_removes_things(Config, fun (R, H) -> ok = forget_cluster_node(H, R) end).
+
+reset_removes_things(Config) ->
+ test_removes_things(Config, fun (R, _H) -> ok = reset(R) end).
+
+test_removes_things(Config, LoseRabbit) ->
+ Unmirrored = <<"unmirrored-queue">>,
+ [Rabbit, Hare] = cluster_members(Config),
+ RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ declare(RCh, Unmirrored),
+ ok = stop_app(Rabbit),
+
+ HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch declare(HCh, Unmirrored)),
+
+ ok = LoseRabbit(Rabbit, Hare),
+ HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ declare(HCh2, Unmirrored),
+ ok.
+
+forget_offline_removes_things(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Unmirrored = <<"unmirrored-queue">>,
+ X = <<"X">>,
+ RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ declare(RCh, Unmirrored),
+
+ amqp_channel:call(RCh, #'exchange.declare'{durable = true,
+ exchange = X,
+ auto_delete = true}),
+ amqp_channel:call(RCh, #'queue.bind'{queue = Unmirrored,
+ exchange = X}),
+ ok = rabbit_ct_broker_helpers:stop_broker(Config, Rabbit),
+
+ HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch declare(HCh, Unmirrored)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["forget_cluster_node", "--offline", Rabbit]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
+
+ HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ declare(HCh2, Unmirrored),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch amqp_channel:call(HCh2,#'exchange.declare'{durable = true,
+ exchange = X,
+ auto_delete = true,
+ passive = true})),
+ ok.
+
+forget_promotes_offline_follower(Config) ->
+ [A, B, C, D] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"mirrored-queue">>,
+ declare(ACh, QName),
+ set_ha_policy(Config, QName, A, [B, C]),
+ set_ha_policy(Config, QName, A, [C, D]), %% Test add and remove from recoverable_mirrors
+
+ %% Publish and confirm
+ amqp_channel:call(ACh, #'confirm.select'{}),
+ amqp_channel:cast(ACh, #'basic.publish'{routing_key = QName},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+ amqp_channel:wait_for_confirms(ACh),
+
+ %% We kill nodes rather than stop them in order to make sure
+ %% that we aren't dependent on anything that happens as they shut
+ %% down (see bug 26467).
+ ok = rabbit_ct_broker_helpers:kill_node(Config, D),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, C),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, B),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, A),
+
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C,
+ ["force_boot"]),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, C),
+
+ %% We should now have the following dramatis personae:
+ %% A - down, master
+ %% B - down, used to be mirror, no longer is, never had the message
+ %% C - running, should be mirror, but has wiped the message on restart
+ %% D - down, recoverable mirror, contains message
+ %%
+ %% So forgetting A should offline-promote the queue to D, keeping
+ %% the message.
+
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C,
+ ["forget_cluster_node", A]),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, D),
+ DCh2 = rabbit_ct_client_helpers:open_channel(Config, D),
+ #'queue.declare_ok'{message_count = 1} = declare(DCh2, QName),
+ ok.
+
+set_ha_policy(Config, QName, Master, Slaves) ->
+ Nodes = [list_to_binary(atom_to_list(N)) || N <- [Master | Slaves]],
+ HaPolicy = {<<"nodes">>, Nodes},
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Master, QName, HaPolicy),
+ await_followers(QName, Master, Slaves).
+
+await_followers(QName, Master, Slaves) ->
+ await_followers_0(QName, Master, Slaves, 10).
+
+await_followers_0(QName, Master, Slaves0, Tries) ->
+ {ok, Queue} = await_followers_lookup_queue(QName, Master),
+ SPids = amqqueue:get_slave_pids(Queue),
+ ActMaster = amqqueue:qnode(Queue),
+ ActSlaves = lists:usort([node(P) || P <- SPids]),
+ Slaves1 = lists:usort(Slaves0),
+ await_followers_1(QName, ActMaster, ActSlaves, Master, Slaves1, Tries).
+
+await_followers_1(QName, _ActMaster, _ActSlaves, _Master, _Slaves, 0) ->
+ error({timeout_waiting_for_followers, QName});
+await_followers_1(QName, ActMaster, ActSlaves, Master, Slaves, Tries) ->
+ case {Master, Slaves} of
+ {ActMaster, ActSlaves} ->
+ ok;
+ _ ->
+ timer:sleep(250),
+ await_followers_0(QName, Master, Slaves, Tries - 1)
+ end.
+
+await_followers_lookup_queue(QName, Master) ->
+ await_followers_lookup_queue(QName, Master, 10).
+
+await_followers_lookup_queue(QName, _Master, 0) ->
+ error({timeout_looking_up_queue, QName});
+await_followers_lookup_queue(QName, Master, Tries) ->
+ RpcArgs = [rabbit_misc:r(<<"/">>, queue, QName)],
+ case rpc:call(Master, rabbit_amqqueue, lookup, RpcArgs) of
+ {error, not_found} ->
+ timer:sleep(250),
+ await_followers_lookup_queue(QName, Master, Tries - 1);
+ {ok, Q} ->
+ {ok, Q}
+ end.
+
+force_boot(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ {error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["force_boot"]),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ {error, _} = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["force_boot"]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ ok.
+
+change_cluster_node_type(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ %% Trying to change the node to the ram type when not clustered should always fail
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> change_cluster_node_type(Rabbit, ram) end),
+ ok = start_app(Rabbit),
+
+ ok = stop_app(Rabbit),
+ join_cluster(Rabbit, Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, ram),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, disc),
+
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, ram),
+ ok = start_app(Rabbit),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare, Rabbit]},
+ [Rabbit, Hare]),
+
+ %% Changing to ram when you're the only ram node should fail
+ ok = stop_app(Hare),
+ assert_failure(fun () -> change_cluster_node_type(Hare, ram) end),
+ ok = start_app(Hare).
+
+change_cluster_when_node_offline(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Cluster the three notes
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ stop_join_start(Bunny, Hare),
+ assert_clustered([Rabbit, Hare, Bunny]),
+
+ %% Bring down Rabbit, and remove Bunny from the cluster while
+ %% Rabbit is offline
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ ok = reset(Bunny),
+ assert_cluster_status({[Bunny], [Bunny], []}, [Bunny]),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Hare, Bunny]}, [Rabbit]),
+
+ %% Bring Rabbit back up
+ ok = start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+
+ %% Now the same, but Rabbit is a RAM node, and we bring up Bunny
+ %% before
+ ok = stop_app(Rabbit),
+ ok = change_cluster_node_type(Rabbit, ram),
+ ok = start_app(Rabbit),
+ stop_join_start(Bunny, Hare),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Hare, Bunny], [Rabbit, Hare, Bunny]},
+ [Rabbit, Hare, Bunny]),
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ ok = reset(Bunny),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, [Hare]),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Hare, Bunny], [Hare, Bunny]},
+ [Rabbit]),
+ ok = start_app(Rabbit),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+ assert_not_clustered(Bunny).
+
+update_cluster_nodes(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Mnesia is running...
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+
+ ok = stop_app(Rabbit),
+ ok = join_cluster(Rabbit, Hare),
+ ok = stop_app(Bunny),
+ ok = join_cluster(Bunny, Hare),
+ ok = start_app(Bunny),
+ stop_reset_start(Hare),
+ assert_failure(fun () -> start_app(Rabbit) end),
+ %% Bogus node
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, non@existent) end),
+ %% Inconsistent node
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+ ok = update_cluster_nodes(Rabbit, Bunny),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Hare),
+ assert_clustered([Rabbit, Bunny]).
+
+classic_config_discovery_node_list(Config) ->
+ [Rabbit, Hare] = cluster_members(Config),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], disc}]),
+ ok = start_app(Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], ram}]),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+
+ %% List of nodes [node()] is equivalent to {[node()], disk}
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, [Rabbit]]),
+ ok = start_app(Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ %% If we use an invalid cluster_nodes conf, the node fails to start.
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, "Yes, please"]),
+ assert_failure(fun () -> start_app(Hare) end),
+ assert_not_clustered(Rabbit).
+
+force_reset_node(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ stop_join_start(Rabbit, Hare),
+ stop_app(Rabbit),
+ force_reset(Rabbit),
+ %% Hare thinks that Rabbit is still clustered
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Hare]),
+ %% %% ...but it isn't
+ assert_cluster_status({[Rabbit], [Rabbit], []}, [Rabbit]),
+ %% We can rejoin Rabbit and Hare
+ update_cluster_nodes(Rabbit, Hare),
+ start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]).
+
+status_with_alarm(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+
+ %% Given: an alarm is raised each node.
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["set_vm_memory_high_watermark", "0.000000001"]),
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["set_disk_free_limit", "2048G"]),
+
+ %% When: we ask for alarm status
+ S = rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_alarm, get_alarms, []),
+ R = rabbit_ct_broker_helpers:rpc(Config, Hare,
+ rabbit_alarm, get_alarms, []),
+
+ %% Then: both nodes have printed alarm information for eachother.
+ ok = alarm_information_on_each_node(S, Rabbit, Hare),
+ ok = alarm_information_on_each_node(R, Rabbit, Hare).
+
+alarm_information_on_each_node(Result, Rabbit, Hare) ->
+ %% Example result:
+ %% [{{resource_limit,disk,'rmq-ct-status_with_alarm-2-24240@localhost'},
+ %% []},
+ %% {{resource_limit,memory,'rmq-ct-status_with_alarm-1-24120@localhost'},
+ %% []}]
+ Alarms = [A || {A, _} <- Result],
+ ?assert(lists:member({resource_limit, memory, Rabbit}, Alarms)),
+ ?assert(lists:member({resource_limit, disk, Hare}, Alarms)),
+
+ ok.
+
+pid_file_and_await_node_startup(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ RabbitConfig = rabbit_ct_broker_helpers:get_node_config(Config,Rabbit),
+ RabbitPidFile = ?config(pid_file, RabbitConfig),
+ %% ensure pid file is readable
+ {ok, _} = file:read_file(RabbitPidFile),
+ %% ensure wait works on running node
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["wait", RabbitPidFile]),
+ %% stop both nodes
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ %% starting first node fails - it was not the last node to stop
+ {error, _} = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ PreviousPid = pid_from_file(RabbitPidFile),
+ %% start first node in the background
+ spawn_link(fun() ->
+ rabbit_ct_broker_helpers:start_node(Config, Rabbit)
+ end),
+ Attempts = 200,
+ Timeout = 50,
+ wait_for_pid_file_to_change(RabbitPidFile, PreviousPid, Attempts, Timeout),
+ {error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["wait", RabbitPidFile]).
+
+await_running_count(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ RabbitConfig = rabbit_ct_broker_helpers:get_node_config(Config,Rabbit),
+ RabbitPidFile = ?config(pid_file, RabbitConfig),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["wait", RabbitPidFile]),
+ %% stop both nodes
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ %% start one node in the background
+ rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["wait", RabbitPidFile]),
+ ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [1, 30000])),
+ ?assertEqual({error, timeout},
+ rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [2, 1000])),
+ ?assertEqual({error, timeout},
+ rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [5, 1000])),
+ rabbit_ct_broker_helpers:start_node(Config, Hare),
+ %% this now succeeds
+ ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [2, 30000])),
+ %% this still succeeds
+ ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [1, 30000])),
+ %% this still fails
+ ?assertEqual({error, timeout},
+ rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [5, 1000])).
+
+%% ----------------------------------------------------------------------------
+%% Internal utils
+%% ----------------------------------------------------------------------------
+
+wait_for_pid_file_to_change(_, 0, _, _) ->
+ error(timeout_waiting_for_pid_file_to_have_running_pid);
+wait_for_pid_file_to_change(PidFile, PreviousPid, Attempts, Timeout) ->
+ Pid = pid_from_file(PidFile),
+ case Pid =/= undefined andalso Pid =/= PreviousPid of
+ true -> ok;
+ false ->
+ ct:sleep(Timeout),
+ wait_for_pid_file_to_change(PidFile,
+ PreviousPid,
+ Attempts - 1,
+ Timeout)
+ end.
+
+pid_from_file(PidFile) ->
+ case file:read_file(PidFile) of
+ {ok, Content} ->
+ string:strip(binary_to_list(Content), both, $\n);
+ {error, enoent} ->
+ undefined
+ end.
+
+cluster_members(Config) ->
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename).
+
+assert_cluster_status(Status0, Nodes) ->
+ Status = {AllNodes, _, _} = sort_cluster_status(Status0),
+ wait_for_cluster_status(Status, AllNodes, Nodes).
+
+wait_for_cluster_status(Status, AllNodes, Nodes) ->
+ Max = 10000 / ?LOOP_RECURSION_DELAY,
+ wait_for_cluster_status(0, Max, Status, AllNodes, Nodes).
+
+wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max ->
+ erlang:error({cluster_status_max_tries_failed,
+ [{nodes, Nodes},
+ {expected_status, Status},
+ {max_tried, Max}]});
+wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) ->
+ case lists:all(fun (Node) ->
+ verify_status_equal(Node, Status, AllNodes)
+ end, Nodes) of
+ true -> ok;
+ false -> timer:sleep(?LOOP_RECURSION_DELAY),
+ wait_for_cluster_status(N + 1, Max, Status, AllNodes, Nodes)
+ end.
+
+verify_status_equal(Node, Status, AllNodes) ->
+ NodeStatus = sort_cluster_status(cluster_status(Node)),
+ (AllNodes =/= [Node]) =:= rpc:call(Node, rabbit_mnesia, is_clustered, [])
+ andalso NodeStatus =:= Status.
+
+cluster_status(Node) ->
+ {rpc:call(Node, rabbit_mnesia, cluster_nodes, [all]),
+ rpc:call(Node, rabbit_mnesia, cluster_nodes, [disc]),
+ rpc:call(Node, rabbit_mnesia, cluster_nodes, [running])}.
+
+sort_cluster_status({All, Disc, Running}) ->
+ {lists:sort(All), lists:sort(Disc), lists:sort(Running)}.
+
+assert_clustered(Nodes) ->
+ assert_cluster_status({Nodes, Nodes, Nodes}, Nodes).
+
+assert_not_clustered(Node) ->
+ assert_cluster_status({[Node], [Node], [Node]}, [Node]).
+
+assert_failure(Fun) ->
+ case catch Fun() of
+ {error, _Code, Reason} -> Reason;
+ {error, Reason} -> Reason;
+ {error_string, Reason} -> Reason;
+ {badrpc, {'EXIT', Reason}} -> Reason;
+ %% Failure to start an app result in node shutdown
+ {badrpc, nodedown} -> nodedown;
+ {badrpc_multi, Reason, _Nodes} -> Reason;
+ Other -> error({expected_failure, Other})
+ end.
+
+stop_app(Node) ->
+ rabbit_control_helper:command(stop_app, Node).
+
+start_app(Node) ->
+ rabbit_control_helper:command(start_app, Node).
+
+join_cluster(Node, To) ->
+ join_cluster(Node, To, false).
+
+join_cluster(Node, To, Ram) ->
+ rabbit_control_helper:command_with_output(join_cluster, Node, [atom_to_list(To)], [{"--ram", Ram}]).
+
+reset(Node) ->
+ rabbit_control_helper:command(reset, Node).
+
+force_reset(Node) ->
+ rabbit_control_helper:command(force_reset, Node).
+
+forget_cluster_node(Node, Removee, RemoveWhenOffline) ->
+ rabbit_control_helper:command(forget_cluster_node, Node, [atom_to_list(Removee)],
+ [{"--offline", RemoveWhenOffline}]).
+
+forget_cluster_node(Node, Removee) ->
+ forget_cluster_node(Node, Removee, false).
+
+change_cluster_node_type(Node, Type) ->
+ rabbit_control_helper:command(change_cluster_node_type, Node, [atom_to_list(Type)]).
+
+update_cluster_nodes(Node, DiscoveryNode) ->
+ rabbit_control_helper:command(update_cluster_nodes, Node, [atom_to_list(DiscoveryNode)]).
+
+stop_join_start(Node, ClusterTo, Ram) ->
+ ok = stop_app(Node),
+ ok = join_cluster(Node, ClusterTo, Ram),
+ ok = start_app(Node).
+
+stop_join_start(Node, ClusterTo) ->
+ stop_join_start(Node, ClusterTo, false).
+
+stop_reset_start(Node) ->
+ ok = stop_app(Node),
+ ok = reset(Node),
+ ok = start_app(Node).
+
+declare(Ch, Name) ->
+ Res = amqp_channel:call(Ch, #'queue.declare'{durable = true,
+ queue = Name}),
+ amqp_channel:call(Ch, #'queue.bind'{queue = Name,
+ exchange = <<"amq.fanout">>}),
+ Res.
diff --git a/deps/rabbit/test/config_schema_SUITE.erl b/deps/rabbit/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..c538736897
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbit, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
diff --git a/deps/rabbit/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbit/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbit/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbit/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbit/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbit/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets b/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets
new file mode 100644
index 0000000000..c6ac600dcc
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets
@@ -0,0 +1,797 @@
+[{internal_auth_backend,
+ "auth_backends.1 = internal",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_internal]}]}],
+ []},
+ {ldap_auth_backend,
+ "auth_backends.1 = ldap",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_ldap]}]}],
+ []},
+ {multiple_auth_backends,
+ "auth_backends.1 = ldap
+auth_backends.2 = internal",
+ [{rabbit,
+ [{auth_backends,
+ [rabbit_auth_backend_ldap,rabbit_auth_backend_internal]}]}],
+ []},
+ {full_name_auth_backend,
+ "auth_backends.1 = ldap
+# uses module name instead of a short alias, \"http\"
+auth_backends.2 = rabbit_auth_backend_http",
+ [{rabbit,
+ [{auth_backends,[rabbit_auth_backend_ldap,rabbit_auth_backend_http]}]}],
+ []},
+ {third_party_auth_backend,
+ "auth_backends.1.authn = internal
+# uses module name because this backend is from a 3rd party
+auth_backends.1.authz = rabbit_auth_backend_ip_range",
+ [{rabbit,
+ [{auth_backends,
+ [{rabbit_auth_backend_internal,rabbit_auth_backend_ip_range}]}]}],
+ []},
+ {authn_authz_backend,
+ "auth_backends.1.authn = ldap
+auth_backends.1.authz = internal",
+ [{rabbit,
+ [{auth_backends,
+ [{rabbit_auth_backend_ldap,rabbit_auth_backend_internal}]}]}],
+ []},
+ {authn_authz_multiple_backends,
+ "auth_backends.1.authn = ldap
+auth_backends.1.authz = internal
+auth_backends.2 = internal",
+ [{rabbit,
+ [{auth_backends,
+ [{rabbit_auth_backend_ldap,rabbit_auth_backend_internal},
+ rabbit_auth_backend_internal]}]}],
+ []},
+ {authn_backend_only,
+ "auth_backends.1.authn = ldap",
+ [{rabbit,
+ [{auth_backends,
+ [{rabbit_auth_backend_ldap,rabbit_auth_backend_ldap}]}]}],
+ []},
+ {ssl_options,
+ "ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = true",
+ [{rabbit,
+ [{ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,true}]}]}],
+ []},
+ {tcp_listener,
+ "listeners.tcp.default = 5673",
+ [{rabbit,[{tcp_listeners,[5673]}]}],[]},
+ {ssl_listener,
+ "listeners.ssl = none",[{rabbit,[{ssl_listeners,[]}]}],[]},
+ {num_acceptors,
+ "num_acceptors.ssl = 1",[{rabbit,[{num_ssl_acceptors,1}]}],[]},
+
+ {socket_writer_gc_threshold,
+ "socket_writer.gc_threshold = 999666111", [{rabbit, [{writer_gc_threshold, 999666111}]}],[]},
+
+ {socket_writer_gc_threshold_off,
+ "socket_writer.gc_threshold = off", [{rabbit, [{writer_gc_threshold, undefined}]}],[]},
+
+ {default_user_settings,
+ "default_user = guest
+default_pass = guest
+default_user_tags.administrator = true
+default_permissions.configure = .*
+default_permissions.read = .*
+default_permissions.write = .*",
+ [{rabbit,
+ [{default_user,<<"guest">>},
+ {default_pass,<<"guest">>},
+ {default_user_tags,[administrator]},
+ {default_permissions,[<<".*">>,<<".*">>,<<".*">>]}]}],
+ []},
+ {cluster_formation,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
+cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2
+cluster_formation.node_type = disc",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config},
+ {node_type,disc}]},
+ {cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
+ []},
+
+ {cluster_formation_module_classic_confog_alias,
+ "cluster_formation.peer_discovery_backend = classic_config
+cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
+cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config}]},
+ {cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
+ []},
+
+ {cluster_formation_module_dns_alias,
+ "cluster_formation.peer_discovery_backend = dns
+cluster_formation.dns.hostname = discovery.eng.example.local",
+ [{rabbit,
+ [
+ {cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_dns},
+ {peer_discovery_dns, [
+ {hostname, <<"discovery.eng.example.local">>}
+ ]}]}
+ ]}],
+ []},
+
+ {cluster_formation_disk,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+ cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
+ cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2
+ cluster_formation.node_type = disk",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config},
+ {node_type,disc}]},
+ {cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
+ []},
+ {cluster_formation_ram_ignored,
+ "cluster_formation.node_type = ram",[],[]},
+ {tcp_listen_options,
+ "tcp_listen_options.backlog = 128
+tcp_listen_options.nodelay = true
+tcp_listen_options.exit_on_close = false",
+ [{rabbit,
+ [{tcp_listen_options,
+ [{backlog,128},{nodelay,true},{exit_on_close,false}]}]}],
+ []},
+ {vm_memory_watermark_absolute,
+ "vm_memory_high_watermark.absolute = 1073741824",
+ [{rabbit,[{vm_memory_high_watermark,{absolute,1073741824}}]}],
+ []},
+ {vm_memory_watermark_absolute_units,
+ "vm_memory_high_watermark.absolute = 1024MB",
+ [{rabbit,[{vm_memory_high_watermark,{absolute,"1024MB"}}]}],
+ []},
+ {vm_memory_watermark_paging_ratio,
+ "vm_memory_high_watermark_paging_ratio = 0.75
+ vm_memory_high_watermark.relative = 0.4",
+ [{rabbit,
+ [{vm_memory_high_watermark_paging_ratio,0.75},
+ {vm_memory_high_watermark,0.4}]}],
+ []},
+ {memory_monitor_interval, "memory_monitor_interval = 5000",
+ [{rabbit,
+ [{memory_monitor_interval, 5000}]}],
+ []},
+ {vm_memory_calculation_strategy, "vm_memory_calculation_strategy = rss",
+ [{rabbit,
+ [{vm_memory_calculation_strategy, rss}]}],
+ []},
+ {vm_memory_calculation_strategy, "vm_memory_calculation_strategy = erlang",
+ [{rabbit,
+ [{vm_memory_calculation_strategy, erlang}]}],
+ []},
+ {vm_memory_calculation_strategy, "vm_memory_calculation_strategy = allocated",
+ [{rabbit,
+ [{vm_memory_calculation_strategy, allocated}]}],
+ []},
+ {vm_memory_calculation_strategy, "vm_memory_calculation_strategy = legacy",
+ [{rabbit,
+ [{vm_memory_calculation_strategy, legacy}]}],
+ []},
+ {total_memory_available_override_value,
+ "total_memory_available_override_value = 1000000000",
+ [{rabbit,[{total_memory_available_override_value, 1000000000}]}],
+ []},
+ {total_memory_available_override_value_units,
+ "total_memory_available_override_value = 1024MB",
+ [{rabbit,[{total_memory_available_override_value, "1024MB"}]}],
+ []},
+ {connection_max,
+ "connection_max = 999",
+ [{rabbit,[{connection_max, 999}]}],
+ []},
+ {connection_max,
+ "connection_max = infinity",
+ [{rabbit,[{connection_max, infinity}]}],
+ []},
+ {channel_max,
+ "channel_max = 16",
+ [{rabbit,[{channel_max, 16}]}],
+ []},
+ {max_message_size,
+ "max_message_size = 131072",
+ [{rabbit, [{max_message_size, 131072}]}],
+ []},
+ {listeners_tcp_ip,
+ "listeners.tcp.1 = 192.168.1.99:5672",
+ [{rabbit,[{tcp_listeners,[{"192.168.1.99",5672}]}]}],
+ []},
+ {listeners_tcp_ip_multiple,
+ "listeners.tcp.1 = 127.0.0.1:5672
+ listeners.tcp.2 = ::1:5672",
+ [{rabbit,[{tcp_listeners,[{"127.0.0.1",5672},{"::1",5672}]}]}],
+ []},
+ {listeners_tcp_ip_all,"listeners.tcp.1 = :::5672",
+ [{rabbit,[{tcp_listeners,[{"::",5672}]}]}],
+ []},
+ {listeners_tcp_ipv6,
+ "listeners.tcp.1 = fe80::2acf:e9ff:fe17:f97b:5672",
+ [{rabbit,[{tcp_listeners,[{"fe80::2acf:e9ff:fe17:f97b",5672}]}]}],
+ []},
+ {tcp_options_sndbuf,
+ "tcp_listen_options.backlog = 128
+ tcp_listen_options.nodelay = true
+ tcp_listen_options.sndbuf = 196608
+ tcp_listen_options.recbuf = 196608",
+ [{rabbit,
+ [{tcp_listen_options,
+ [{backlog,128},{nodelay,true},{sndbuf,196608},{recbuf,196608}]}]}],
+ []},
+ {tcp_listen_options_nodelay_with_kernel,
+ "tcp_listen_options.backlog = 4096
+ tcp_listen_options.nodelay = true",
+ [{kernel,
+ [{inet_default_connect_options,[{nodelay,true}]},
+ {inet_default_listen_options,[{nodelay,true}]}]}],
+ [{kernel,
+ [{inet_default_connect_options,[{nodelay,true}]},
+ {inet_default_listen_options,[{nodelay,true}]}]},
+ {rabbit,[{tcp_listen_options,[{backlog,4096},{nodelay,true}]}]}],
+ []},
+ {tcp_listen_options_nodelay,
+ "tcp_listen_options.backlog = 4096
+ tcp_listen_options.nodelay = true",
+ [{rabbit,[{tcp_listen_options,[{backlog,4096},{nodelay,true}]}]}],
+ []},
+ {ssl_handshake_timeout,
+ "ssl_handshake_timeout = 10000",
+ [{rabbit,[{ssl_handshake_timeout,10000}]}],
+ []},
+ {cluster_partition_handling_pause_if_all_down,
+ "cluster_partition_handling = pause_if_all_down
+
+ ## Recover strategy. Can be either 'autoheal' or 'ignore'
+ cluster_partition_handling.pause_if_all_down.recover = ignore
+
+ ## Node names to check
+ cluster_partition_handling.pause_if_all_down.nodes.1 = rabbit@myhost1
+ cluster_partition_handling.pause_if_all_down.nodes.2 = rabbit@myhost2",
+ [{rabbit,
+ [{cluster_partition_handling,
+ {pause_if_all_down,[rabbit@myhost2,rabbit@myhost1],ignore}}]}],
+ []},
+ {cluster_partition_handling_autoheal,
+ "cluster_partition_handling = autoheal",
+ [{rabbit,[{cluster_partition_handling,autoheal}]}],
+ []},
+ {password_hashing,
+ "password_hashing_module = rabbit_password_hashing_sha512",
+ [{rabbit,[{password_hashing_module,rabbit_password_hashing_sha512}]}],
+ []},
+ {ssl_options_verify_peer,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_password,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.password = t0p$3kRe7",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {password,"t0p$3kRe7"}]}]}],
+ []},
+ {ssl_options_tls_ver_old,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.versions.tls1_2 = tlsv1.2
+ ssl_options.versions.tls1_1 = tlsv1.1
+ ssl_options.versions.tls1 = tlsv1",
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1',tlsv1]}]}],
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1',tlsv1]}]},
+ {rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {versions,['tlsv1.2','tlsv1.1',tlsv1]}]}]}],
+ []},
+ {ssl_options_tls_ver_new,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.versions.tls1_2 = tlsv1.2
+ ssl_options.versions.tls1_1 = tlsv1.1",
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1']}]}],
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1']}]},
+ {rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {versions,['tlsv1.2','tlsv1.1']}]}]}],
+ []},
+
+ {ssl_options_ciphers,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.versions.1 = tlsv1.2
+ ssl_options.versions.2 = tlsv1.1
+ ssl_options.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+ ssl_options.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+ ssl_options.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+ ssl_options.ciphers.4 = ECDHE-RSA-AES256-SHA384
+ ssl_options.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+ ssl_options.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+ ssl_options.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+ ssl_options.ciphers.8 = ECDH-RSA-AES256-SHA384
+ ssl_options.ciphers.9 = DHE-RSA-AES256-GCM-SHA384",
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1']}]}],
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1']}]},
+ {rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {ciphers, [
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDH-ECDSA-AES256-GCM-SHA384",
+ "ECDH-RSA-AES256-GCM-SHA384",
+ "ECDH-ECDSA-AES256-SHA384",
+ "ECDH-RSA-AES256-SHA384",
+ "DHE-RSA-AES256-GCM-SHA384"
+ ]},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {versions,['tlsv1.2','tlsv1.1']}]}]}],
+ []},
+
+ {ssl_options_allow_poodle,
+ "listeners.ssl.1 = 5671
+ ssl_allow_poodle_attack = true
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_allow_poodle_attack,true},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_depth,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 2
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_depth_0,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 0
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,0},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_depth_255,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 255
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,255},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_honor_cipher_order,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 2
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false
+ ssl_options.honor_cipher_order = true",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_cipher_order, true}]}]}],
+ []},
+ {ssl_options_honor_ecc_order,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 2
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false
+ ssl_options.honor_ecc_order = true",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_ecc_order, true}]}]}],
+ []},
+
+ {ssl_cert_login_from_cn,
+ "ssl_cert_login_from = common_name",
+ [{rabbit,[{ssl_cert_login_from, common_name}]}],
+ []},
+
+ {ssl_cert_login_from_dn,
+ "ssl_cert_login_from = distinguished_name",
+ [{rabbit,[{ssl_cert_login_from, distinguished_name}]}],
+ []},
+
+ {ssl_cert_login_from_san_dns,
+ "ssl_cert_login_from = subject_alternative_name
+ ssl_cert_login_san_type = dns
+ ssl_cert_login_san_index = 0",
+ [{rabbit,[
+ {ssl_cert_login_from, subject_alternative_name},
+ {ssl_cert_login_san_type, dns},
+ {ssl_cert_login_san_index, 0}
+ ]}],
+ []},
+
+ {tcp_listen_options_linger_on,
+ "tcp_listen_options.linger.on = true
+ tcp_listen_options.linger.timeout = 100",
+ [{rabbit,[{tcp_listen_options,[{linger,{true,100}}]}]}],
+ []},
+ {tcp_listen_options_linger_off,
+ "tcp_listen_options.linger.on = false
+ tcp_listen_options.linger.timeout = 100",
+ [{rabbit,[{tcp_listen_options,[{linger,{false,100}}]}]}],
+ []},
+ {tcp_listen_options_linger_on_notimeout,
+ "tcp_listen_options.linger.on = true",
+ [{rabbit,[{tcp_listen_options,[{linger,{true,0}}]}]}],
+ []},
+ {tcp_listen_options_linger_timeout,
+ "tcp_listen_options.linger.timeout = 100",
+ [{rabbit,[{tcp_listen_options,[{linger,{false,100}}]}]}],
+ []},
+
+ {cluster_formation_randomized_startup_delay_both_values,
+ "cluster_formation.randomized_startup_delay_range.min = 10
+ cluster_formation.randomized_startup_delay_range.max = 30",
+ [{rabbit, [{cluster_formation, [
+ {randomized_startup_delay_range, {10, 30}}
+ ]}]}],
+ []},
+
+ {cluster_formation_randomized_startup_delay_min_only,
+ "cluster_formation.randomized_startup_delay_range.min = 10",
+ [{rabbit, [{cluster_formation, [
+ {randomized_startup_delay_range, {10, 60}}
+ ]}]}],
+ []},
+
+ {cluster_formation_randomized_startup_delay_max_only,
+ "cluster_formation.randomized_startup_delay_range.max = 30",
+ [{rabbit, [{cluster_formation, [
+ {randomized_startup_delay_range, {5, 30}}
+ ]}]}],
+ []},
+
+ {cluster_formation_dns,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_dns
+ cluster_formation.dns.hostname = 192.168.0.2.xip.io
+ cluster_formation.node_type = disc",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_dns,[{hostname,<<"192.168.0.2.xip.io">>}]},
+ {peer_discovery_backend,rabbit_peer_discovery_dns},
+ {node_type,disc}]}]}],
+ []},
+ {cluster_formation_classic,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+ cluster_formation.node_type = disc",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config},
+ {node_type,disc}]}]}],
+ []},
+ {cluster_formation_classic_ram,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+ cluster_formation.node_type = ram",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config},
+ {node_type,ram}]}]}],
+ []},
+ {background_gc_enabled,
+ "background_gc_enabled = true
+ background_gc_target_interval = 30000",
+ [{rabbit,
+ [{background_gc_enabled,true},{background_gc_target_interval,30000}]}],
+ []},
+ {background_gc_disabled,
+ "background_gc_enabled = false
+ background_gc_target_interval = 30000",
+ [{rabbit,
+ [{background_gc_enabled,false},{background_gc_target_interval,30000}]}],
+ []},
+ {credential_validator_length,
+ "credential_validator.validation_backend = rabbit_credential_validator_min_password_length
+credential_validator.min_length = 10",
+ [{rabbit,
+ [{credential_validator,
+ [{validation_backend,
+ rabbit_credential_validator_min_password_length},
+ {min_length,10}]}]}],
+ []},
+ {credential_validator_regexp,
+ "credential_validator.validation_backend = rabbit_credential_validator_password_regexp
+credential_validator.regexp = ^abc\\d+",
+ [{rabbit,
+ [{credential_validator,
+ [{validation_backend,rabbit_credential_validator_password_regexp},
+ {regexp,"^abc\\d+"}]}]}],
+ []},
+ {proxy_protocol_on,
+ "proxy_protocol = true",
+ [{rabbit,[{proxy_protocol,true}]}],[]},
+ {proxy_protocol_off,
+ "proxy_protocol = false",
+ [{rabbit,[{proxy_protocol,false}]}],[]},
+ {log_debug_file,
+ "log.file.level = debug",
+ [{rabbit,[{log, [{file, [{level, debug}]}]}]}],
+ []},
+ {log_debug_console,
+ "log.console = true
+ log.console.level = debug",
+ [{rabbit,[{log, [{console, [{enabled, true}, {level, debug}]}]}]}],
+ []},
+ {log_debug_exchange,
+ "log.exchange = true
+ log.exchange.level = debug",
+ [{rabbit,[{log, [{exchange, [{enabled, true}, {level, debug}]}]}]}],
+ []},
+ {log_debug_syslog,
+ "log.syslog = true
+ log.syslog.level = debug",
+ [{rabbit,[{log, [{syslog, [{enabled, true}, {level, debug}]}]}]}],
+ []},
+ {log_file_name,
+ "log.file = file_name",
+ [{rabbit,[{log, [{file, [{file, "file_name"}]}]}]}],
+ []},
+ {log_file_disabled,
+ "log.file = false",
+ [{rabbit,[{log, [{file, [{file, false}]}]}]}],
+ []},
+ {log_category_level,
+ "log.connection.level = debug
+ log.channel.level = error",
+ [{rabbit,[{log, [{categories, [{connection, [{level, debug}]},
+ {channel, [{level, error}]}]}]}]}],
+ []},
+ {log_category_file,
+ "log.connection.file = file_name_connection
+ log.channel.file = file_name_channel",
+ [{rabbit,[{log, [{categories, [{connection, [{file, "file_name_connection"}]},
+ {channel, [{file, "file_name_channel"}]}]}]}]}],
+ []},
+
+ {default_worker_pool_size,
+ "default_worker_pool_size = 512",
+ [{rabbit, [
+ {default_worker_pool_size, 512}
+ ]}],
+ []},
+
+ {delegate_count,
+ "delegate_count = 64",
+ [{rabbit, [
+ {delegate_count, 64}
+ ]}],
+ []},
+
+ {kernel_net_ticktime,
+ "net_ticktime = 20",
+ [{kernel, [
+ {net_ticktime, 20}
+ ]}],
+ []},
+
+ {rabbit_consumer_timeout,
+ "consumer_timeout = 20000",
+ [{rabbit, [
+ {consumer_timeout, 20000}
+ ]}],
+ []},
+
+ {rabbit_msg_store_shutdown_timeout,
+ "message_store_shutdown_timeout = 600000",
+ [{rabbit, [
+ {msg_store_shutdown_timeout, 600000}
+ ]}],
+ []},
+
+ {rabbit_mnesia_table_loading_retry_timeout,
+ "mnesia_table_loading_retry_timeout = 45000",
+ [{rabbit, [
+ {mnesia_table_loading_retry_timeout, 45000}
+ ]}],
+ []},
+
+ {log_syslog_settings,
+ "log.syslog = true
+ log.syslog.identity = rabbitmq
+ log.syslog.facility = user
+ log.syslog.multiline_mode = true
+ log.syslog.ip = 10.10.10.10
+ log.syslog.port = 123",
+ [
+ {rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
+ {syslog, [{app_name, "rabbitmq"},
+ {facility, user},
+ {multiline_mode, true},
+ {dest_host, "10.10.10.10"},
+ {dest_port, 123}]}
+ ],
+ []},
+ {log_syslog_tcp,
+ "log.syslog = true
+ log.syslog.transport = tcp
+ log.syslog.protocol = rfc5424
+ log.syslog.host = syslog.my-network.com",
+ [
+ {rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
+ {syslog, [{protocol, {rfc5424, tcp}},
+ {dest_host, "syslog.my-network.com"}]}
+ ],
+ []},
+ {log_syslog_udp_default,
+ "log.syslog = true
+ log.syslog.protocol = rfc3164",
+ [
+ {rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
+ {syslog, [{protocol, {rfc3164, udp}}]}
+ ],
+ []},
+ {log_syslog_tls,
+ "log.syslog = true
+ log.syslog.transport = tls
+ log.syslog.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ log.syslog.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ log.syslog.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ log.syslog.ssl_options.verify = verify_peer
+ log.syslog.ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit, [{log, [{syslog, [{enabled, true}]}]}]},
+ {syslog, [{protocol, {rfc5424, tls,
+ [{verify,verify_peer},
+ {fail_if_no_peer_cert,false},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}]}}]}],
+ []},
+
+ %%
+ %% Definitions
+ %%
+
+ {definition_files, "load_definitions = test/definition_import_SUITE_data/case1.json",
+ [{rabbit,
+ [{load_definitions, "test/definition_import_SUITE_data/case1.json"}]}],
+ []},
+
+ %%
+ %% Raft
+ %%
+
+ {raft_data_dir,
+ "raft.data_dir = /data/rabbitmq/raft/log",
+ [{ra, [
+ {data_dir, "/data/rabbitmq/raft/log"}
+ ]}],
+ []},
+
+ {raft_segment_max_entries,
+ "raft.segment_max_entries = 65536",
+ [{ra, [
+ {segment_max_entries, 65536}
+ ]}],
+ []},
+
+ {raft_wal_max_size_bytes,
+ "raft.wal_max_size_bytes = 1048576",
+ [{ra, [
+ {wal_max_size_bytes, 1048576}
+ ]}],
+ []},
+
+ {raft_wal_max_batch_size,
+ "raft.wal_max_batch_size = 4096",
+ [{ra, [
+ {wal_max_batch_size, 4096}
+ ]}],
+ []},
+
+ {raft_snapshot_chunk_size,
+ "raft.snapshot_chunk_size = 1000000",
+ [{ra, [
+ {snapshot_chunk_size, 1000000}
+ ]}],
+ []}
+
+].
diff --git a/deps/rabbit/test/confirms_rejects_SUITE.erl b/deps/rabbit/test/confirms_rejects_SUITE.erl
new file mode 100644
index 0000000000..a51253885c
--- /dev/null
+++ b/deps/rabbit/test/confirms_rejects_SUITE.erl
@@ -0,0 +1,412 @@
+-module(confirms_rejects_SUITE).
+
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ OverflowTests = [
+ confirms_rejects_conflict,
+ policy_resets_to_default
+ ],
+ [
+ {parallel_tests, [parallel], [
+ {overflow_reject_publish_dlx, [parallel], OverflowTests},
+ {overflow_reject_publish, [parallel], OverflowTests},
+ dead_queue_rejects,
+ mixed_dead_alive_queues_reject
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+
+init_per_group(overflow_reject_publish, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish">>}
+ ]);
+init_per_group(overflow_reject_publish_dlx, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish-dlx">>}
+ ]);
+init_per_group(Group, Config) ->
+ ClusterSize = 2,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(overflow_reject_publish, _Config) ->
+ ok;
+end_per_group(overflow_reject_publish_dlx, _Config) ->
+ ok;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(policy_resets_to_default = Testcase, Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ rabbit_ct_helpers:testcase_started(
+ rabbit_ct_helpers:set_config(Config, [{conn, Conn}]), Testcase);
+init_per_testcase(Testcase, Config)
+ when Testcase == confirms_rejects_conflict;
+ Testcase == dead_queue_rejects;
+ Testcase == mixed_dead_alive_queues_reject ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ Conn1 = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+
+ rabbit_ct_helpers:testcase_started(
+ rabbit_ct_helpers:set_config(Config, [{conn, Conn}, {conn1, Conn1}]),
+ Testcase).
+
+end_per_testcase(policy_resets_to_default = Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ XOverflow = ?config(overflow, Config),
+ QueueName = <<"policy_resets_to_default", "_", XOverflow/binary>>,
+ amqp_channel:call(Ch, #'queue.delete'{queue = QueueName}),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, 0),
+
+ Conn = ?config(conn, Config),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(confirms_rejects_conflict = Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ XOverflow = ?config(overflow, Config),
+ QueueName = <<"confirms_rejects_conflict", "_", XOverflow/binary>>,
+ amqp_channel:call(Ch, #'queue.delete'{queue = QueueName}),
+ end_per_testcase0(Testcase, Config);
+end_per_testcase(dead_queue_rejects = Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"dead_queue_rejects">>}),
+ end_per_testcase0(Testcase, Config);
+end_per_testcase(mixed_dead_alive_queues_reject = Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"mixed_dead_alive_queues_reject_dead">>}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"mixed_dead_alive_queues_reject_alive">>}),
+ amqp_channel:call(Ch, #'exchange.delete'{exchange = <<"mixed_dead_alive_queues_reject">>}),
+ end_per_testcase0(Testcase, Config).
+
+end_per_testcase0(Testcase, Config) ->
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, 0),
+
+ Conn = ?config(conn, Config),
+ Conn1 = ?config(conn1, Config),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ rabbit_ct_client_helpers:close_connection(Conn1),
+
+ clean_acks_mailbox(),
+
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+dead_queue_rejects(Config) ->
+ Conn = ?config(conn, Config),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QueueName = <<"dead_queue_rejects">>,
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true}),
+
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = <<"HI">>}),
+
+ receive
+ {'basic.ack',_,_} -> ok
+ after 10000 ->
+ error(timeout_waiting_for_initial_ack)
+ end,
+
+ BasicPublish = #'basic.publish'{routing_key = QueueName},
+ AmqpMsg = #amqp_msg{payload = <<"HI">>},
+ kill_queue_expect_nack(Config, Ch, QueueName, BasicPublish, AmqpMsg, 5).
+
+mixed_dead_alive_queues_reject(Config) ->
+ Conn = ?config(conn, Config),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QueueNameDead = <<"mixed_dead_alive_queues_reject_dead">>,
+ QueueNameAlive = <<"mixed_dead_alive_queues_reject_alive">>,
+ ExchangeName = <<"mixed_dead_alive_queues_reject">>,
+
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueNameDead,
+ durable = true}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueNameAlive,
+ durable = true}),
+
+ amqp_channel:call(Ch, #'exchange.declare'{exchange = ExchangeName,
+ durable = true}),
+
+ amqp_channel:call(Ch, #'queue.bind'{exchange = ExchangeName,
+ queue = QueueNameAlive,
+ routing_key = <<"route">>}),
+
+ amqp_channel:call(Ch, #'queue.bind'{exchange = ExchangeName,
+ queue = QueueNameDead,
+ routing_key = <<"route">>}),
+
+ amqp_channel:call(Ch, #'basic.publish'{exchange = ExchangeName,
+ routing_key = <<"route">>},
+ #amqp_msg{payload = <<"HI">>}),
+
+ receive
+ {'basic.ack',_,_} -> ok;
+ {'basic.nack',_,_,_} -> error(expecting_ack_got_nack)
+ after 50000 ->
+ error({timeout_waiting_for_initial_ack, process_info(self(), messages)})
+ end,
+
+ BasicPublish = #'basic.publish'{exchange = ExchangeName, routing_key = <<"route">>},
+ AmqpMsg = #amqp_msg{payload = <<"HI">>},
+ kill_queue_expect_nack(Config, Ch, QueueNameDead, BasicPublish, AmqpMsg, 5).
+
+kill_queue_expect_nack(_Config, _Ch, _QueueName, _BasicPublish, _AmqpMsg, 0) ->
+ error(expecting_nack_got_ack);
+kill_queue_expect_nack(Config, Ch, QueueName, BasicPublish, AmqpMsg, Tries) ->
+ kill_the_queue(QueueName, Config),
+ amqp_channel:cast(Ch, BasicPublish, AmqpMsg),
+ R = receive
+ {'basic.nack',_,_,_} ->
+ ok;
+ {'basic.ack',_,_} ->
+ retry
+ after 10000 ->
+ error({timeout_waiting_for_nack, process_info(self(), messages)})
+ end,
+ case R of
+ ok ->
+ ok;
+ retry ->
+ kill_queue_expect_nack(Config, Ch, QueueName, BasicPublish, AmqpMsg, Tries - 1)
+ end.
+
+confirms_rejects_conflict(Config) ->
+ Conn = ?config(conn, Config),
+ Conn1 = ?config(conn1, Config),
+
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ false = Conn =:= Conn1,
+ false = Ch =:= Ch1,
+
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ XOverflow = ?config(overflow, Config),
+ QueueName = <<"confirms_rejects_conflict", "_", XOverflow/binary>>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true,
+ arguments = [{<<"x-max-length">>, long, 12},
+ {<<"x-overflow">>, longstr, XOverflow}]
+ }),
+ %% Consume 3 messages at once. Do that often.
+ Consume = fun Consume() ->
+ receive
+ stop -> ok
+ after 1 ->
+ amqp_channel:cast(Ch1, #'basic.get'{queue = QueueName, no_ack = true}),
+ amqp_channel:cast(Ch1, #'basic.get'{queue = QueueName, no_ack = true}),
+ amqp_channel:cast(Ch1, #'basic.get'{queue = QueueName, no_ack = true}),
+ amqp_channel:cast(Ch1, #'basic.get'{queue = QueueName, no_ack = true}),
+ Consume()
+ end
+ end,
+
+ Produce = fun
+ Produce(0) -> ok;
+ Produce(N) ->
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = <<"HI">>}),
+ Produce(N - 1)
+ end,
+
+ %% Initial queue should be full
+ % Produce(20),
+
+ %% Consumer is a separate process.
+ Consumer = spawn(Consume),
+
+ %% A long run. Should create race conditions hopefully.
+ Produce(500000),
+
+ Result = validate_acks_mailbox(),
+
+ Consumer ! stop,
+ % Result.
+ case Result of
+ ok -> ok;
+ {error, E} -> error(E)
+ end.
+
+policy_resets_to_default(Config) ->
+ Conn = ?config(conn, Config),
+
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ XOverflow = ?config(overflow, Config),
+ QueueName = <<"policy_resets_to_default", "_", XOverflow/binary>>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true
+ }),
+ MaxLength = 2,
+ rabbit_ct_broker_helpers:set_policy(
+ Config, 0,
+ QueueName, QueueName, <<"queues">>,
+ [{<<"max-length">>, MaxLength}, {<<"overflow">>, XOverflow}]),
+
+ timer:sleep(1000),
+
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = <<"HI">>})
+ || _ <- lists:seq(1, MaxLength)],
+
+ assert_acks(MaxLength),
+
+ #'queue.declare_ok'{message_count = MaxLength} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true}),
+
+ RejectedMessage = <<"HI-rejected">>,
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = RejectedMessage}),
+
+ assert_nack(),
+
+ rabbit_ct_broker_helpers:set_policy(
+ Config, 0,
+ QueueName, QueueName, <<"queues">>,
+ [{<<"max-length">>, MaxLength}]),
+
+ NotRejectedMessage = <<"HI-not-rejected">>,
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = NotRejectedMessage}),
+
+ assert_ack(),
+
+ #'queue.declare_ok'{message_count = MaxLength} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true}),
+
+ Msgs = consume_all_messages(Ch, QueueName),
+ case {lists:member(RejectedMessage, Msgs), lists:member(NotRejectedMessage, Msgs)} of
+ {true, _} -> error({message_should_be_rejected, RejectedMessage});
+ {_, false} -> error({message_should_be_enqueued, NotRejectedMessage});
+ _ -> ok
+ end.
+
+consume_all_messages(Ch, QueueName) ->
+ consume_all_messages(Ch, QueueName, []).
+
+consume_all_messages(Ch, QueueName, Msgs) ->
+ case amqp_channel:call(Ch, #'basic.get'{queue = QueueName, no_ack = true}) of
+ {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} ->
+ consume_all_messages(Ch, QueueName, [Msg | Msgs]);
+ #'basic.get_empty'{} -> Msgs
+ end.
+
+assert_ack() ->
+ receive {'basic.ack', _, _} -> ok
+ after 10000 -> error(timeout_waiting_for_ack)
+ end,
+ clean_acks_mailbox().
+
+assert_nack() ->
+ receive {'basic.nack', _, _, _} -> ok
+ after 10000 -> error(timeout_waiting_for_nack)
+ end,
+ clean_acks_mailbox().
+
+assert_acks(N) ->
+ receive {'basic.ack', N, _} -> ok
+ after 10000 -> error({timeout_waiting_for_ack, N})
+ end,
+ clean_acks_mailbox().
+
+validate_acks_mailbox() ->
+ Result = validate_acks_mailbox({0, ok}),
+ clean_acks_mailbox(),
+ Result.
+
+validate_acks_mailbox({LatestMultipleN, LatestMultipleAck}) ->
+ Received = receive
+ {'basic.ack', N, Multiple} = A -> {N, Multiple, A};
+ {'basic.nack', N, Multiple, _} = A -> {N, Multiple, A}
+ after
+ 10000 -> none
+ end,
+ % ct:pal("Received ~p~n", [Received]),
+ case Received of
+ {LatestN, IsMultiple, AckOrNack} ->
+ case LatestN < LatestMultipleN of
+ true ->
+ {error, {received_ack_lower_than_latest_multiple, AckOrNack, smaller_than, LatestMultipleAck}};
+ false ->
+ case IsMultiple of
+ true -> validate_acks_mailbox({LatestN, AckOrNack});
+ false -> validate_acks_mailbox({LatestMultipleN, LatestMultipleAck})
+ end
+ end;
+ none -> ok
+ end.
+
+clean_acks_mailbox() ->
+ receive
+ {'basic.ack', _, _} -> clean_acks_mailbox();
+ {'basic.nack', _, _, _} -> clean_acks_mailbox()
+ after
+ 1000 -> done
+ end.
+
+kill_the_queue(QueueName, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, kill_the_queue, [QueueName]).
+
+kill_the_queue(QueueName) ->
+ [begin
+ {ok, Q} = rabbit_amqqueue:lookup({resource, <<"/">>, queue, QueueName}),
+ Pid = amqqueue:get_pid(Q),
+ ct:pal("~w killed", [Pid]),
+ timer:sleep(1),
+ exit(Pid, kill)
+ end
+ || _ <- lists:seq(1, 50)],
+ timer:sleep(1),
+ {ok, Q} = rabbit_amqqueue:lookup({resource, <<"/">>, queue, QueueName}),
+ Pid = amqqueue:get_pid(Q),
+ case is_process_alive(Pid) of
+ %% Try to kill it again
+ true -> kill_the_queue(QueueName);
+ false -> ok
+ end.
+
+flush() ->
+ receive
+ Any ->
+ ct:pal("flush ~p", [Any]),
+ flush()
+ after 0 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/consumer_timeout_SUITE.erl b/deps/rabbit/test/consumer_timeout_SUITE.erl
new file mode 100644
index 0000000000..468714328d
--- /dev/null
+++ b/deps/rabbit/test/consumer_timeout_SUITE.erl
@@ -0,0 +1,262 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(consumer_timeout_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+-import(quorum_queue_utils, [wait_for_messages/2]).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ AllTests = [consumer_timeout,
+ consumer_timeout_basic_get,
+ consumer_timeout_no_basic_cancel_capability
+ ],
+ [
+ {parallel_tests, [],
+ [
+ {classic_queue, [parallel], AllTests},
+ {mirrored_queue, [parallel], AllTests},
+ {quorum_queue, [parallel], AllTests}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 7}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(classic_queue, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, true}]);
+init_per_group(quorum_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(mirrored_queue, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, true}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(Group, Config0) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{channel_tick_interval, 1000},
+ {quorum_tick_interval, 1000},
+ {consumer_timeout, 5000}]}),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config0, [])
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q},
+ {queue_name_2, Q2}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name_2, Config)}),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+consumer_timeout(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QName, false),
+ erlang:monitor(process, Conn),
+ erlang:monitor(process, Ch),
+ receive
+ {'DOWN', _, process, Ch, _} -> ok
+ after 30000 ->
+ flush(1),
+ exit(channel_exit_expected)
+ end,
+ receive
+ {'DOWN', _, process, Conn, _} ->
+ flush(1),
+ exit(unexpected_connection_exit)
+ after 2000 ->
+ ok
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consumer_timeout_basic_get(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [_DelTag] = consume(Ch, QName, [<<"msg1">>]),
+ erlang:monitor(process, Conn),
+ erlang:monitor(process, Ch),
+ receive
+ {'DOWN', _, process, Ch, _} -> ok
+ after 30000 ->
+ flush(1),
+ exit(channel_exit_expected)
+ end,
+ receive
+ {'DOWN', _, process, Conn, _} ->
+ flush(1),
+ exit(unexpected_connection_exit)
+ after 2000 ->
+ ok
+ end,
+ ok.
+
+
+-define(CLIENT_CAPABILITIES,
+ [{<<"publisher_confirms">>, bool, true},
+ {<<"exchange_exchange_bindings">>, bool, true},
+ {<<"basic.nack">>, bool, true},
+ {<<"consumer_cancel_notify">>, bool, false},
+ {<<"connection.blocked">>, bool, true},
+ {<<"authentication_failure_close">>, bool, true}]).
+
+consumer_timeout_no_basic_cancel_capability(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ Props = [{<<"capabilities">>, table, ?CLIENT_CAPABILITIES}],
+ AmqpParams = #amqp_params_network{port = Port,
+ host = "localhost",
+ virtual_host = <<"/">>,
+ client_properties = Props
+ },
+ {ok, Conn} = amqp_connection:start(AmqpParams),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ erlang:monitor(process, Conn),
+ erlang:monitor(process, Ch),
+ subscribe(Ch, QName, false),
+ receive
+ {#'basic.deliver'{delivery_tag = _,
+ redelivered = false}, _} ->
+ %% do nothing with the delivery should trigger timeout
+ ok
+ after 5000 ->
+ exit(deliver_timeout)
+ end,
+ receive
+ {'DOWN', _, process, Ch, _} -> ok
+ after 30000 ->
+ flush(1),
+ exit(channel_exit_expected)
+ end,
+ receive
+ {'DOWN', _, process, Conn, _} ->
+ flush(1),
+ exit(unexpected_connection_exit)
+ after 2000 ->
+ ok
+ end,
+ ok.
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% Test helpers
+%%%%%%%%%%%%%%%%%%%%%%%%
+
+declare_queue(Ch, Config, QName) ->
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = Durable}).
+publish(Ch, QName, Payloads) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload})
+ || Payload <- Payloads].
+
+consume(Ch, QName, Payloads) ->
+ consume(Ch, QName, false, Payloads).
+
+consume(Ch, QName, NoAck, Payloads) ->
+ [begin
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName,
+ no_ack = NoAck}),
+ DTag
+ end || Payload <- Payloads].
+
+subscribe(Ch, Queue, NoAck) ->
+ subscribe(Ch, Queue, NoAck, <<"ctag">>).
+
+subscribe(Ch, Queue, NoAck, Ctag) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ consumer_tag = Ctag},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = Ctag} ->
+ ok
+ end.
+
+flush(T) ->
+ receive X ->
+ ct:pal("flushed ~w", [X]),
+ flush(T)
+ after T ->
+ ok
+ end.
diff --git a/deps/rabbit/test/crashing_queues_SUITE.erl b/deps/rabbit/test/crashing_queues_SUITE.erl
new file mode 100644
index 0000000000..cf88fb00f0
--- /dev/null
+++ b/deps/rabbit/test/crashing_queues_SUITE.erl
@@ -0,0 +1,267 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(crashing_queues_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ crashing_unmirrored,
+ crashing_mirrored,
+ give_up_after_repeated_crashes
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+crashing_unmirrored(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
+ QName = <<"crashing_unmirrored-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ test_queue_failure(A, ChA, ConnB, 1, 0,
+ #'queue.declare'{queue = QName, durable = true}),
+ test_queue_failure(A, ChA, ConnB, 0, 0,
+ #'queue.declare'{queue = QName, durable = false}),
+ ok.
+
+crashing_mirrored(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<".*">>, <<"all">>),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
+ QName = <<"crashing_mirrored-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ test_queue_failure(A, ChA, ConnB, 2, 1,
+ #'queue.declare'{queue = QName, durable = true}),
+ ok.
+
+test_queue_failure(Node, Ch, RaceConn, MsgCount, FollowerCount, Decl) ->
+ #'queue.declare_ok'{queue = QName} = amqp_channel:call(Ch, Decl),
+ try
+ publish(Ch, QName, transient),
+ publish(Ch, QName, durable),
+ Racer = spawn_declare_racer(RaceConn, Decl),
+ kill_queue(Node, QName),
+ assert_message_count(MsgCount, Ch, QName),
+ assert_follower_count(FollowerCount, Node, QName),
+ stop_declare_racer(Racer)
+ after
+ amqp_channel:call(Ch, #'queue.delete'{queue = QName})
+ end.
+
+give_up_after_repeated_crashes(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ChB = rabbit_ct_client_helpers:open_channel(Config, B),
+ QName = <<"give_up_after_repeated_crashes-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ amqp_channel:call(ChA, #'queue.declare'{queue = QName,
+ durable = true}),
+ await_state(A, QName, running),
+ publish(ChA, QName, durable),
+ kill_queue_hard(A, QName),
+ {'EXIT', _} = (catch amqp_channel:call(
+ ChA, #'queue.declare'{queue = QName,
+ durable = true})),
+ await_state(A, QName, crashed),
+ amqp_channel:call(ChB, #'queue.delete'{queue = QName}),
+ amqp_channel:call(ChB, #'queue.declare'{queue = QName,
+ durable = true}),
+ await_state(A, QName, running),
+
+ %% Since it's convenient, also test absent queue status here.
+ rabbit_ct_broker_helpers:stop_node(Config, B),
+ await_state(A, QName, down),
+ ok.
+
+
+publish(Ch, QName, DelMode) ->
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = QName},
+ Msg = #amqp_msg{props = #'P_basic'{delivery_mode = del_mode(DelMode)}},
+ amqp_channel:cast(Ch, Publish, Msg),
+ amqp_channel:wait_for_confirms(Ch).
+
+del_mode(transient) -> 1;
+del_mode(durable) -> 2.
+
+spawn_declare_racer(Conn, Decl) ->
+ Self = self(),
+ spawn_link(fun() -> declare_racer_loop(Self, Conn, Decl) end).
+
+stop_declare_racer(Pid) ->
+ Pid ! stop,
+ MRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MRef, process, Pid, _} -> ok
+ end.
+
+declare_racer_loop(Parent, Conn, Decl) ->
+ receive
+ stop -> unlink(Parent)
+ after 0 ->
+ %% Catch here because we might happen to catch the queue
+ %% while it is in the middle of recovering and thus
+ %% explode with NOT_FOUND because crashed. Doesn't matter,
+ %% we are only in this loop to try to fool the recovery
+ %% code anyway.
+ try
+ case amqp_connection:open_channel(Conn) of
+ {ok, Ch} -> amqp_channel:call(Ch, Decl);
+ closing -> ok
+ end
+ catch
+ exit:_ ->
+ ok
+ end,
+ declare_racer_loop(Parent, Conn, Decl)
+ end.
+
+await_state(Node, QName, State) ->
+ await_state(Node, QName, State, 30000).
+
+await_state(Node, QName, State, Time) ->
+ case state(Node, QName) of
+ State ->
+ ok;
+ Other ->
+ case Time of
+ 0 -> exit({timeout_awaiting_state, State, Other});
+ _ -> timer:sleep(100),
+ await_state(Node, QName, State, Time - 100)
+ end
+ end.
+
+state(Node, QName) ->
+ V = <<"/">>,
+ Res = rabbit_misc:r(V, queue, QName),
+ Infos = rpc:call(Node, rabbit_amqqueue, info_all, [V, [name, state]]),
+ case Infos of
+ [] -> undefined;
+ [[{name, Res}, {state, State}]] -> State
+ end.
+
+kill_queue_hard(Node, QName) ->
+ case kill_queue(Node, QName) of
+ crashed -> ok;
+ _NewPid -> timer:sleep(100),
+ kill_queue_hard(Node, QName)
+ end.
+
+kill_queue(Node, QName) ->
+ Pid1 = queue_pid(Node, QName),
+ exit(Pid1, boom),
+ await_new_pid(Node, QName, Pid1).
+
+queue_pid(Node, QName) ->
+ Q = lookup(Node, QName),
+ QPid = amqqueue:get_pid(Q),
+ State = amqqueue:get_state(Q),
+ #resource{virtual_host = VHost} = amqqueue:get_name(Q),
+ case State of
+ crashed ->
+ case rabbit_amqqueue_sup_sup:find_for_vhost(VHost, Node) of
+ {error, {queue_supervisor_not_found, _}} -> {error, no_sup};
+ {ok, SPid} ->
+ case sup_child(Node, SPid) of
+ {ok, _} -> QPid; %% restarting
+ {error, no_child} -> crashed %% given up
+ end
+ end;
+ _ -> QPid
+ end.
+
+sup_child(Node, Sup) ->
+ case rpc:call(Node, supervisor2, which_children, [Sup]) of
+ [{_, Child, _, _}] -> {ok, Child};
+ [] -> {error, no_child};
+ {badrpc, {'EXIT', {noproc, _}}} -> {error, no_sup}
+ end.
+
+lookup(Node, QName) ->
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+ [rabbit_misc:r(<<"/">>, queue, QName)]),
+ Q.
+
+await_new_pid(Node, QName, OldPid) ->
+ case queue_pid(Node, QName) of
+ OldPid -> timer:sleep(10),
+ await_new_pid(Node, QName, OldPid);
+ New -> New
+ end.
+
+assert_message_count(Count, Ch, QName) ->
+ #'queue.declare_ok'{message_count = Count} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ passive = true}).
+
+assert_follower_count(Count, Node, QName) ->
+ Q = lookup(Node, QName),
+ [{_, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [slave_pids]]),
+ RealCount = case Pids of
+ '' -> 0;
+ _ -> length(Pids)
+ end,
+ case RealCount of
+ Count ->
+ ok;
+ _ when RealCount < Count ->
+ timer:sleep(10),
+ assert_follower_count(Count, Node, QName);
+ _ ->
+ exit({too_many_replicas, Count, RealCount})
+ end.
diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl
new file mode 100644
index 0000000000..4ee917aa21
--- /dev/null
+++ b/deps/rabbit/test/dead_lettering_SUITE.erl
@@ -0,0 +1,1174 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%% For the full spec see: https://www.rabbitmq.com/dlx.html
+%%
+-module(dead_lettering_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-import(quorum_queue_utils, [wait_for_messages/2]).
+
+all() ->
+ [
+ {group, dead_letter_tests}
+ ].
+
+groups() ->
+ DeadLetterTests = [dead_letter_nack,
+ dead_letter_multiple_nack,
+ dead_letter_nack_requeue,
+ dead_letter_nack_requeue_multiple,
+ dead_letter_reject,
+ dead_letter_reject_requeue,
+ dead_letter_max_length_drop_head,
+ dead_letter_missing_exchange,
+ dead_letter_routing_key,
+ dead_letter_routing_key_header_CC,
+ dead_letter_routing_key_header_BCC,
+ dead_letter_routing_key_cycle_max_length,
+ dead_letter_routing_key_cycle_with_reject,
+ dead_letter_policy,
+ dead_letter_override_policy,
+ dead_letter_ignore_policy,
+ dead_letter_headers,
+ dead_letter_headers_reason_maxlen,
+ dead_letter_headers_cycle,
+ dead_letter_headers_BCC,
+ dead_letter_headers_CC,
+ dead_letter_headers_CC_with_routing_key,
+ dead_letter_headers_first_death],
+ Opts = [],
+ [
+ {dead_letter_tests, [],
+ [
+ {classic_queue, Opts, DeadLetterTests ++ [dead_letter_ttl,
+ dead_letter_max_length_reject_publish_dlx,
+ dead_letter_routing_key_cycle_ttl,
+ dead_letter_headers_reason_expired,
+ dead_letter_headers_reason_expired_per_message]},
+ {mirrored_queue, Opts, DeadLetterTests ++ [dead_letter_ttl,
+ dead_letter_max_length_reject_publish_dlx,
+ dead_letter_routing_key_cycle_ttl,
+ dead_letter_headers_reason_expired,
+ dead_letter_headers_reason_expired_per_message]},
+ {quorum_queue, Opts, DeadLetterTests}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(classic_queue, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, false}]);
+init_per_group(quorum_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(mirrored_queue, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, false}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [])
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])),
+ Policy = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_policy", [Group, Testcase])),
+ DLXExchange = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_dlx_exchange",
+ [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{dlx_exchange, DLXExchange},
+ {queue_name, Q},
+ {queue_name_dlx, Q2},
+ {policy, Policy}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name_dlx, Config)}),
+ amqp_channel:call(Ch, #'exchange.delete'{exchange = ?config(dlx_exchange, Config)}),
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, 0, ?config(policy, Config)),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Dead letter exchanges
+%%
+%% Messages are dead-lettered when:
+%% 1) message is rejected with basic.reject or basic.nack with requeue=false
+%% 2) message ttl expires (not implemented in quorum queues)
+%% 3) queue length limit is exceeded (only drop-head implemented in quorum queues)
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% 1) message is rejected with basic.nack, requeue=false and multiple=false
+dead_letter_nack(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ %% Consume them
+ [DTag1, DTag2, DTag3] = consume(Ch, QName, [P1, P2, P3]),
+ %% Nack the last one with multiple = false
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag3,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Queue is empty
+ consume_empty(Ch, QName),
+ %% Consume the last message from the dead letter queue
+ consume(Ch, DLXQName, [P3]),
+ consume_empty(Ch, DLXQName),
+ %% Nack the other two
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = false,
+ requeue = false}),
+ %% Queue is empty
+ consume_empty(Ch, QName),
+ %% Consume the first two messages from the dead letter queue
+ consume(Ch, DLXQName, [P1, P2]),
+ consume_empty(Ch, DLXQName).
+
+%% 1) message is rejected with basic.nack, requeue=false and multiple=true
+dead_letter_multiple_nack(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ %% Consume them
+ [_, _, DTag3] = consume(Ch, QName, [P1, P2, P3]),
+ %% Nack the last one with multiple = true
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag3,
+ multiple = true,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"3">>, <<"3">>, <<"0">>]]),
+ %% Consume the 3 messages from the dead letter queue
+ consume(Ch, DLXQName, [P1, P2, P3]),
+ consume_empty(Ch, DLXQName),
+ %% Queue is empty
+ consume_empty(Ch, QName).
+
+%% 1) message is rejected with basic.nack, requeue=true and multiple=false. Dead-lettering does not take place
+dead_letter_nack_requeue(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume them
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DTag3] = consume(Ch, QName, [P1, P2, P3]),
+ %% Queue is empty
+ consume_empty(Ch, QName),
+ %% Nack the last one with multiple = false
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag3,
+ multiple = false,
+ requeue = true}),
+ %% Consume the last message from the queue
+ wait_for_messages(Config, [[QName, <<"3">>, <<"1">>, <<"2">>]]),
+ consume(Ch, QName, [P3]),
+ consume_empty(Ch, QName),
+ %% Dead letter queue is empty
+ consume_empty(Ch, DLXQName).
+
+%% 1) message is rejected with basic.nack, requeue=true and multiple=true. Dead-lettering does not take place
+dead_letter_nack_requeue_multiple(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume them
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DTag3] = consume(Ch, QName, [P1, P2, P3]),
+ %% Queue is empty
+ consume_empty(Ch, QName),
+ %% Nack the last one with multiple = true
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag3,
+ multiple = true,
+ requeue = true}),
+ %% Consume the three messages from the queue
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ consume(Ch, QName, [P1, P2, P3]),
+ consume_empty(Ch, QName),
+ %% Dead letter queue is empty
+ consume_empty(Ch, DLXQName).
+
+%% 1) message is rejected with basic.reject, requeue=false
+dead_letter_reject(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume the first message
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P1]),
+ %% Reject it
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag,
+ requeue = false}),
+ %% Consume it from the dead letter queue
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ _ = consume(Ch, DLXQName, [P1]),
+ consume_empty(Ch, DLXQName),
+ %% Consume the last two from the queue
+ _ = consume(Ch, QName, [P2, P3]),
+ consume_empty(Ch, QName).
+
+%% 1) Message is rejected with basic.reject, requeue=true. Dead-lettering does not take place.
+dead_letter_reject_requeue(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume the first one
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P1]),
+ %% Reject the first one
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag,
+ requeue = true}),
+ %% Consume the three messages from the queue
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ _ = consume(Ch, QName, [P1, P2, P3]),
+ consume_empty(Ch, QName),
+ %% Dead letter is empty
+ consume_empty(Ch, DLXQName).
+
+%% 2) Message ttl expires
+dead_letter_ttl(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, [{<<"x-message-ttl">>, long, 1}]),
+
+ %% Publish message
+ P1 = <<"msg1">>,
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume_empty(Ch, QName),
+ [_] = consume(Ch, DLXQName, [P1]).
+
+%% 3) The queue length limit is exceeded, message dropped is dead lettered.
+%% Default strategy: drop-head
+dead_letter_max_length_drop_head(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, [{<<"x-max-length">>, long, 1}]),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume the last one from the queue (max-length = 1)
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ _ = consume(Ch, QName, [P3]),
+ consume_empty(Ch, QName),
+ %% Consume the dropped ones from the dead letter queue
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"2">>, <<"0">>]]),
+ _ = consume(Ch, DLXQName, [P1, P2]),
+ consume_empty(Ch, DLXQName).
+
+%% Another strategy: reject-publish-dlx
+dead_letter_max_length_reject_publish_dlx(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName,
+ [{<<"x-max-length">>, long, 1},
+ {<<"x-overflow">>, longstr, <<"reject-publish-dlx">>}]),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume the first one from the queue (max-length = 1)
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ _ = consume(Ch, QName, [P1]),
+ consume_empty(Ch, QName),
+ %% Consume the dropped ones from the dead letter queue
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"2">>, <<"0">>]]),
+ _ = consume(Ch, DLXQName, [P2, P3]),
+ consume_empty(Ch, DLXQName).
+
+%% Dead letter exchange does not have to be declared when the queue is declared, but it should
+%% exist by the time messages need to be dead-lettered; if it is missing then, the messages will
+%% be silently dropped.
+dead_letter_missing_exchange(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ DLXExchange = <<"dlx-exchange-2">>,
+ #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = DLXExchange}),
+
+ DeadLetterArgs = [{<<"x-max-length">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, DLXExchange},
+ {<<"x-dead-letter-routing-key">>, longstr, DLXQName}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish one message
+ publish(Ch, QName, [P1]),
+ %% Consume it
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P1]),
+ %% Reject it
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ %% Message is not in the dead letter queue (exchange does not exist)
+ consume_empty(Ch, DLXQName),
+
+ %% Declare the dead-letter exchange
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ %% Publish another message
+ publish(Ch, QName, [P2]),
+ %% Consume it
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag2] = consume(Ch, QName, [P2]),
+ %% Reject it
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag2,
+ requeue = false}),
+ %% Consume the rejected message from the dead letter queue
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ consume_empty(Ch, DLXQName).
+
+%%
+%% ROUTING
+%%
+%% Dead-lettered messages are routed to their dead letter exchange either:
+%% with the routing key specified for the queue they were on; or,
+%% if this was not set, (3) with the same routing keys they were originally published with.
+%% (4) This includes routing keys added by the CC and BCC headers.
+%%
+%% 3) All previous tests used a specific key, test the original ones now.
+dead_letter_routing_key(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish, consume and nack the first message
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Both queues are empty as the message could not been routed in the dlx exchange
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ consume_empty(Ch, QName),
+ consume_empty(Ch, DLXQName),
+ %% Bind the dlx queue with the original queue routing key
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = QName}),
+ %% Publish, consume and nack the second message
+ publish(Ch, QName, [P2]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag2] = consume(Ch, QName, [P2]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = false,
+ requeue = false}),
+ %% Message can now be routed using the recently binded key
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume(Ch, DLXQName, [P2]),
+ consume_empty(Ch, QName).
+
+
+%% 4a) If a specific routing key was not set for the queue, use routing keys added by the
+%% CC and BCC headers
+dead_letter_routing_key_header_CC(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ CCHeader = {<<"CC">>, array, [{longstr, DLXQName}]},
+
+ %% Publish, consume and nack two messages, one with CC header
+ publish(Ch, QName, [P1]),
+ publish(Ch, QName, [P2], [CCHeader]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [_, DTag2] = consume(Ch, QName, [P1, P2]),
+ %% P2 is also published to the DLX queue because of the binding to the default exchange
+ [_] = consume(Ch, DLXQName, [P2]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = true,
+ requeue = false}),
+ %% The second message should have been routed using the CC header
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ consume_empty(Ch, QName),
+ consume(Ch, DLXQName, [P2]),
+ consume_empty(Ch, DLXQName).
+
+%% 4b) If a specific routing key was not set for the queue, use routing keys added by the
+%% CC and BCC headers
+dead_letter_routing_key_header_BCC(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ BCCHeader = {<<"BCC">>, array, [{longstr, DLXQName}]},
+
+ %% Publish, consume and nack two messages, one with BCC header
+ publish(Ch, QName, [P1]),
+ publish(Ch, QName, [P2], [BCCHeader]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [_, DTag2] = consume(Ch, QName, [P1, P2]),
+ %% P2 is also published to the DLX queue because of the binding to the default exchange
+ [_] = consume(Ch, DLXQName, [P2]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = true,
+ requeue = false}),
+ %% The second message should have been routed using the BCC header
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ consume_empty(Ch, QName),
+ consume(Ch, DLXQName, [P2]),
+ consume_empty(Ch, DLXQName).
+
+%% It is possible to form a cycle of message dead-lettering. For instance,
+%% this can happen when a queue dead-letters messages to the default exchange without
+%% specifying a dead-letter routing key (5). Messages in such cycles (i.e. messages that
+%% reach the same queue twice) will be dropped if there was no rejections in the entire cycle.
+%% i.e. x-message-ttl (7), x-max-length (6)
+%%
+%% 6) Message is dead lettered due to queue length limit, and then dropped by the broker as it is
+%% republished to the same queue.
+dead_letter_routing_key_cycle_max_length(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ DeadLetterArgs = [{<<"x-max-length">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish messages, consume and acknowledge the second one (x-max-length = 1)
+ publish(Ch, QName, [P1, P2]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P2]),
+ consume_empty(Ch, QName),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ %% Queue is empty, P1 has not been republished in a loop
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ consume_empty(Ch, QName).
+
+%% 7) Message is dead lettered due to message ttl. Not yet implemented in quorum queues
+dead_letter_routing_key_cycle_ttl(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ DeadLetterArgs = [{<<"x-message-ttl">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish messages
+ publish(Ch, QName, [P1, P2]),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ consume_empty(Ch, QName).
+
+%% 5) Messages continue to be republished as there are manual rejections
+dead_letter_routing_key_cycle_with_reject(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+
+ P = <<"msg1">>,
+
+ %% Publish message
+ publish(Ch, QName, [P]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Message its being republished
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [_] = consume(Ch, QName, [P]).
+
+%%
+%% For any given queue, a DLX can be defined by clients using the queue's arguments,
+%% or in the server using policies (8). In the case where both policy and arguments specify a DLX,
+%% the one specified in arguments overrules the one specified in policy (9).
+%%
+%% 8) Use server policies
+dead_letter_policy(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use arguments
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = Args,
+ durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName,
+ durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish 2 messages
+ publish(Ch, QName, [P1, P2]),
+ %% Consume them
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [DTag1, DTag2] = consume(Ch, QName, [P1, P2]),
+ %% Nack the first one with multiple = false
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Only one message unack left in the queue
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ consume_empty(Ch, QName),
+ consume_empty(Ch, DLXQName),
+
+ %% Set a policy
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?config(policy, Config), QName,
+ <<"queues">>,
+ [{<<"dead-letter-exchange">>, DLXExchange},
+ {<<"dead-letter-routing-key">>, DLXQName}]),
+ timer:sleep(1000),
+ %% Nack the second message
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = false,
+ requeue = false}),
+ %% Queue is empty
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ consume_empty(Ch, QName),
+ %% Consume the message from the dead letter queue
+ consume(Ch, DLXQName, [P2]),
+ consume_empty(Ch, DLXQName).
+
+%% 9) Argument overrides server policy
+dead_letter_override_policy(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+
+ %% Set a policy, it creates a cycle but message will be republished with the nack.
+ %% Good enough for this test.
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?config(policy, Config), QName,
+ <<"queues">>,
+ [{<<"dead-letter-exchange">>, <<>>},
+ {<<"dead-letter-routing-key">>, QName}]),
+
+ %% Declare arguments override the policy and set routing queue
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Queue is empty
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume_empty(Ch, QName),
+ [_] = consume(Ch, DLXQName, [P1]).
+
+%% 9) Policy is set after have declared a queue with dead letter arguments. Policy will be
+%% overridden/ignored.
+dead_letter_ignore_policy(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ %% Set a policy
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?config(policy, Config), QName,
+ <<"queues">>,
+ [{<<"dead-letter-exchange">>, <<>>},
+ {<<"dead-letter-routing-key">>, QName}]),
+
+ P1 = <<"msg1">>,
+
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Message is in the dead letter queue, original queue is empty
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ [_] = consume(Ch, DLXQName, [P1]),
+ consume_empty(Ch, QName).
+
+%%
+%% HEADERS
+%%
+%% The dead-lettering process adds an array to the header of each dead-lettered message named
+%% x-death (10). This array contains an entry for each dead lettering event containing:
+%% queue, reason, time, exchange, routing-keys, count
+%% original-expiration (14) (if the message was dead-letterered due to per-message TTL)
+%% New entries are prepended to the beginning of the x-death array.
+%% Reason is one of the following: rejected (11), expired (12), maxlen (13)
+%%
+%% 10) and 11) Check all x-death headers, reason rejected
+dead_letter_headers(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ %% Publish and nack a message
+ P1 = <<"msg1">>,
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Consume and check headers
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>),
+ ?assertEqual({longstr, QName}, rabbit_misc:table_lookup(Death, <<"queue">>)),
+ ?assertEqual({longstr, <<"rejected">>}, rabbit_misc:table_lookup(Death, <<"reason">>)),
+ ?assertMatch({timestamp, _}, rabbit_misc:table_lookup(Death, <<"time">>)),
+ ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death, <<"exchange">>)),
+ ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death, <<"count">>)),
+ ?assertEqual({array, [{longstr, QName}]}, rabbit_misc:table_lookup(Death, <<"routing-keys">>)).
+
+%% 12) Per-queue message ttl has expired
+dead_letter_headers_reason_expired(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, [{<<"x-message-ttl">>, long, 1}]),
+
+ %% Publish a message
+ P1 = <<"msg1">>,
+ publish(Ch, QName, [P1]),
+ %% Consume and check headers
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>),
+ ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Death, <<"reason">>)),
+ ?assertMatch(undefined, rabbit_misc:table_lookup(Death, <<"original-expiration">>)).
+
+%% 14) Per-message TTL has expired, original-expiration is added to x-death array
+dead_letter_headers_reason_expired_per_message(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ %% Publish a message
+ P1 = <<"msg1">>,
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = P1,
+ props = #'P_basic'{expiration = <<"1">>}}),
+ %% publish another message to ensure the queue performs message expirations
+ publish(Ch, QName, [<<"msg2">>]),
+ %% Consume and check headers
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>),
+ ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Death, <<"reason">>)),
+ ?assertMatch({longstr, <<"1">>}, rabbit_misc:table_lookup(Death, <<"original-expiration">>)).
+
+%% 13) Message expired with maxlen reason
+dead_letter_headers_reason_maxlen(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, [{<<"x-max-length">>, long, 1}]),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ publish(Ch, QName, [P1, P2]),
+ %% Consume and check reason header
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>),
+ ?assertEqual({longstr, <<"maxlen">>}, rabbit_misc:table_lookup(Death, <<"reason">>)).
+
+%% In case x-death already contains an entry with the same queue and dead lettering reason,
+%% its count field will be incremented and it will be moved to the beginning of the array
+dead_letter_headers_cycle(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+
+ P = <<"msg1">>,
+
+ %% Publish message
+ publish(Ch, QName, [P]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P,
+ props = #'P_basic'{headers = Headers1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {array, [{table, Death1}]} = rabbit_misc:table_lookup(Headers1, <<"x-death">>),
+ ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death1, <<"count">>)),
+
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Message its being republished
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {array, [{table, Death2}]} = rabbit_misc:table_lookup(Headers2, <<"x-death">>),
+ ?assertEqual({long, 2}, rabbit_misc:table_lookup(Death2, <<"count">>)).
+
+%% Dead-lettering a message modifies its headers:
+%% the exchange name is replaced with that of the latest dead-letter exchange,
+%% the routing key may be replaced with that specified in a queue performing dead lettering,
+%% if the above happens, the CC header will also be removed (15) and
+%% the BCC header will be removed as per Sender-selected distribution (16)
+%%
+%% CC header is kept if no dead lettering routing key is provided
+dead_letter_headers_CC(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key for dead lettering, the CC header is passed
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ CCHeader = {<<"CC">>, array, [{longstr, DLXQName}]},
+ publish(Ch, QName, [P1], [CCHeader]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Message is published to both queues because of CC header and DLX queue bound to both
+ %% exchanges
+ {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ %% We check the headers to ensure no dead lettering has happened
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers1, <<"x-death">>)),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers2, <<"x-death">>)),
+
+ %% Nack the message so it now gets dead lettered
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers3}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ consume_empty(Ch, QName),
+ ?assertEqual({array, [{longstr, DLXQName}]}, rabbit_misc:table_lookup(Headers3, <<"CC">>)),
+ ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)).
+
+%% 15) CC header is removed when routing key is specified
+dead_letter_headers_CC_with_routing_key(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key for dead lettering, the CC header is passed
+ DeadLetterArgs = [{<<"x-dead-letter-routing-key">>, longstr, DLXQName},
+ {<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ CCHeader = {<<"CC">>, array, [{longstr, DLXQName}]},
+ publish(Ch, QName, [P1], [CCHeader]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Message is published to both queues because of CC header and DLX queue bound to both
+ %% exchanges
+ {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ %% We check the headers to ensure no dead lettering has happened
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers1, <<"x-death">>)),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers2, <<"x-death">>)),
+
+ %% Nack the message so it now gets dead lettered
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers3}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ consume_empty(Ch, QName),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers3, <<"CC">>)),
+ ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)).
+
+%% 16) the BCC header will always be removed
+dead_letter_headers_BCC(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key for dead lettering
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ BCCHeader = {<<"BCC">>, array, [{longstr, DLXQName}]},
+ publish(Ch, QName, [P1], [BCCHeader]),
+ %% Message is published to both queues because of BCC header and DLX queue bound to both
+ %% exchanges
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ %% We check the headers to ensure no dead lettering has happened
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers1, <<"x-death">>)),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers2, <<"x-death">>)),
+
+ %% Nack the message so it now gets dead lettered
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers3}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ consume_empty(Ch, QName),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers3, <<"BCC">>)),
+ ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)).
+
+
+%% Three top-level headers are added for the very first dead-lettering event.
+%% They are
+%% x-first-death-reason, x-first-death-queue, x-first-death-exchange
+%% They have the same values as the reason, queue, and exchange fields of the
+%% original
+%% dead lettering event. Once added, these headers are never modified.
+dead_letter_headers_first_death(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Let's create a small dead-lettering loop QName -> DLXQName -> QName
+ DeadLetterArgs = [{<<"x-dead-letter-routing-key">>, longstr, DLXQName},
+ {<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ DLXDeadLetterArgs = [{<<"x-dead-letter-routing-key">>, longstr, QName},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable, arguments = DLXDeadLetterArgs}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+
+ %% Publish and nack a message
+ P1 = <<"msg1">>,
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Consume and check headers
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{delivery_tag = DTag2}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ ?assertEqual({longstr, <<"rejected">>},
+ rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)),
+ ?assertEqual({longstr, QName},
+ rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)),
+ ?assertEqual({longstr, <<>>},
+ rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)),
+ %% Nack the message again so it gets dead lettered to the initial queue. x-first-death
+ %% headers should not change
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ ?assertEqual({longstr, <<"rejected">>},
+ rabbit_misc:table_lookup(Headers2, <<"x-first-death-reason">>)),
+ ?assertEqual({longstr, QName},
+ rabbit_misc:table_lookup(Headers2, <<"x-first-death-queue">>)),
+ ?assertEqual({longstr, <<>>},
+ rabbit_misc:table_lookup(Headers2, <<"x-first-death-exchange">>)).
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% Test helpers
+%%%%%%%%%%%%%%%%%%%%%%%%
+declare_dead_letter_queues(Ch, Config, QName, DLXQName) ->
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, []).
+
+declare_dead_letter_queues(Ch, Config, QName, DLXQName, ExtraArgs) ->
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Declare DLX exchange
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+
+ %% Declare queue
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange},
+ {<<"x-dead-letter-routing-key">>, longstr, DLXQName}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args ++ ExtraArgs, durable = Durable}),
+
+ %% Declare and bind DLX queue
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}).
+
+publish(Ch, QName, Payloads) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload})
+ || Payload <- Payloads].
+
+publish(Ch, QName, Payloads, Headers) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = Payload,
+ props = #'P_basic'{headers = Headers}})
+ || Payload <- Payloads].
+
+consume(Ch, QName, Payloads) ->
+ [begin
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ DTag
+ end || Payload <- Payloads].
+
+consume_empty(Ch, QName) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+sync_mirrors(QName, Config) ->
+ case ?config(is_mirrored, Config) of
+ true ->
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
+ _ -> ok
+ end.
diff --git a/deps/rabbit/test/definition_import_SUITE.erl b/deps/rabbit/test/definition_import_SUITE.erl
new file mode 100644
index 0000000000..ac0c18da99
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE.erl
@@ -0,0 +1,257 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(definition_import_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, boot_time_import},
+ {group, roundtrip},
+ {group, import_on_a_running_node}
+ ].
+
+groups() ->
+ [
+ {import_on_a_running_node, [], [
+ %% Note: to make it easier to see which case failed,
+ %% these are intentionally not folded into a single case.
+ %% If generation becomes an alternative worth considering for these tests,
+ %% we'll just add a case that drives PropEr.
+ import_case1,
+ import_case2,
+ import_case3,
+ import_case4,
+ import_case5,
+ import_case6,
+ import_case7,
+ import_case8,
+ import_case9,
+ import_case10,
+ import_case11,
+ import_case12,
+ import_case13
+ ]},
+ {boot_time_import, [], [
+ import_on_a_booting_node
+ ]},
+
+ {roundtrip, [], [
+ export_import_round_trip_case1,
+ export_import_round_trip_case2
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ Config.
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(boot_time_import = Group, Config) ->
+ CasePath = filename:join(?config(data_dir, Config), "case5.json"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(Config1,
+ {rabbit, [
+ {load_definitions, CasePath}
+ ]}),
+ rabbit_ct_helpers:run_setup_steps(Config2, rabbit_ct_broker_helpers:setup_steps());
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1, rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%%
+%% Tests
+%%
+
+import_case1(Config) -> import_file_case(Config, "case1").
+import_case2(Config) -> import_file_case(Config, "case2").
+import_case3(Config) -> import_file_case(Config, "case3").
+import_case4(Config) -> import_file_case(Config, "case4").
+import_case6(Config) -> import_file_case(Config, "case6").
+import_case7(Config) -> import_file_case(Config, "case7").
+import_case8(Config) -> import_file_case(Config, "case8").
+
+import_case9(Config) -> import_from_directory_case(Config, "case9").
+
+import_case10(Config) -> import_from_directory_case_fails(Config, "case10").
+
+import_case5(Config) ->
+ import_file_case(Config, "case5"),
+ ?assertEqual(rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_runtime_parameters, value_global,
+ [mqtt_port_to_vhost_mapping]),
+ %% expect a proplist, see rabbitmq/rabbitmq-management#528
+ [{<<"1883">>,<<"/">>},
+ {<<"1884">>,<<"vhost2">>}]).
+
+import_case11(Config) -> import_file_case(Config, "case11").
+import_case12(Config) -> import_invalid_file_case(Config, "failing_case12").
+
+import_case13(Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ import_file_case(Config, "case13"),
+ VHost = <<"/">>,
+ QueueName = <<"definitions.import.case13.qq.1">>,
+ QueueIsImported =
+ fun () ->
+ case queue_lookup(Config, VHost, QueueName) of
+ {ok, _} -> true;
+ _ -> false
+ end
+ end,
+ rabbit_ct_helpers:await_condition(QueueIsImported, 20000),
+ {ok, Q} = queue_lookup(Config, VHost, QueueName),
+
+ %% see rabbitmq/rabbitmq-server#2400, rabbitmq/rabbitmq-server#2426
+ ?assert(amqqueue:is_quorum(Q)),
+ ?assertEqual([{<<"x-max-length">>, long, 991},
+ {<<"x-queue-type">>, longstr, <<"quorum">>}],
+ amqqueue:get_arguments(Q));
+ Skip ->
+ Skip
+ end.
+
+export_import_round_trip_case1(Config) ->
+ %% case 6 has runtime parameters that do not depend on any plugins
+ import_file_case(Config, "case6"),
+ Defs = export(Config),
+ import_raw(Config, rabbit_json:encode(Defs)).
+
+export_import_round_trip_case2(Config) ->
+ import_file_case(Config, "case9", "case9a"),
+ Defs = export(Config),
+ import_parsed(Config, Defs).
+
+import_on_a_booting_node(Config) ->
+ %% see case5.json
+ VHost = <<"vhost2">>,
+ %% verify that vhost2 eventually starts
+ case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, await_running_on_all_nodes, [VHost, 3000]) of
+ ok -> ok;
+ {error, timeout} -> ct:fail("virtual host ~p was not imported on boot", [VHost])
+ end.
+
+%%
+%% Implementation
+%%
+
+import_file_case(Config, CaseName) ->
+ CasePath = filename:join([
+ ?config(data_dir, Config),
+ CaseName ++ ".json"
+ ]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_import_case, [CasePath]),
+ ok.
+
+import_file_case(Config, Subdirectory, CaseName) ->
+ CasePath = filename:join([
+ ?config(data_dir, Config),
+ Subdirectory,
+ CaseName ++ ".json"
+ ]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_import_case, [CasePath]),
+ ok.
+
+import_invalid_file_case(Config, CaseName) ->
+ CasePath = filename:join(?config(data_dir, Config), CaseName ++ ".json"),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_invalid_import_case, [CasePath]),
+ ok.
+
+import_from_directory_case(Config, CaseName) ->
+ import_from_directory_case_expect(Config, CaseName, ok).
+
+import_from_directory_case_fails(Config, CaseName) ->
+ import_from_directory_case_expect(Config, CaseName, error).
+
+import_from_directory_case_expect(Config, CaseName, Expected) ->
+ CasePath = filename:join(?config(data_dir, Config), CaseName),
+ ?assert(filelib:is_dir(CasePath)),
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_directory_import_case,
+ [CasePath, Expected]),
+ ok.
+
+import_raw(Config, Body) ->
+ case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_definitions, import_raw, [Body]) of
+ ok -> ok;
+ {error, E} ->
+ ct:pal("Import of JSON definitions ~p failed: ~p~n", [Body, E]),
+ ct:fail({failure, Body, E})
+ end.
+
+import_parsed(Config, Body) ->
+ case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_definitions, import_parsed, [Body]) of
+ ok -> ok;
+ {error, E} ->
+ ct:pal("Import of parsed definitions ~p failed: ~p~n", [Body, E]),
+ ct:fail({failure, Body, E})
+ end.
+
+export(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_export, []).
+
+run_export() ->
+ rabbit_definitions:all_definitions().
+
+run_directory_import_case(Path, Expected) ->
+ ct:pal("Will load definitions from files under ~p~n", [Path]),
+ Result = rabbit_definitions:maybe_load_definitions_from(true, Path),
+ case Expected of
+ ok ->
+ ok = Result;
+ error ->
+ ?assertMatch({error, {failed_to_import_definitions, _, _}}, Result)
+ end.
+
+run_import_case(Path) ->
+ {ok, Body} = file:read_file(Path),
+ ct:pal("Successfully loaded a definition to import from ~p~n", [Path]),
+ case rabbit_definitions:import_raw(Body) of
+ ok -> ok;
+ {error, E} ->
+ ct:pal("Import case ~p failed: ~p~n", [Path, E]),
+ ct:fail({failure, Path, E})
+ end.
+
+run_invalid_import_case(Path) ->
+ {ok, Body} = file:read_file(Path),
+ ct:pal("Successfully loaded a definition to import from ~p~n", [Path]),
+ case rabbit_definitions:import_raw(Body) of
+ ok ->
+ ct:pal("Expected import case ~p to fail~n", [Path]),
+ ct:fail({failure, Path});
+ {error, _E} -> ok
+ end.
+
+queue_lookup(Config, VHost, Name) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, lookup, [rabbit_misc:r(VHost, queue, Name)]).
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case1.json b/deps/rabbit/test/definition_import_SUITE_data/case1.json
new file mode 100644
index 0000000000..b0785a5214
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case1.json
@@ -0,0 +1,99 @@
+{
+ "rabbit_version": "3.6.9",
+ "users": [
+ {
+ "name": "project_admin",
+ "password_hash": "A0EX\/2hiwrIDKFS+nEqwbCGcVxwEkDBFF3mBfkNW53KFFk64",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "\/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "project_admin",
+ "vhost": "\/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "policies": [
+ {
+ "vhost": "\/",
+ "name": "nd-ns",
+ "pattern": "^project-nd-ns-",
+ "apply-to": "queues",
+ "definition": {
+ "expires": 120000,
+ "max-length": 10000
+ },
+ "priority": 1
+ },
+ {
+ "vhost": "\/",
+ "name": "nd-s",
+ "pattern": "^project-nd-s-",
+ "apply-to": "queues",
+ "definition": {
+ "expires": 1800000,
+ "max-length": 50000
+ },
+ "priority": 1
+ },
+ {
+ "vhost": "\/",
+ "name": "d-ns",
+ "pattern": "^project-d-ns-",
+ "apply-to": "queues",
+ "definition": {
+ "ha-mode": "exactly",
+ "ha-params": 3,
+ "ha-sync-mode": "automatic",
+ "expires": 604800000,
+ "ha-sync-batch-size": 100,
+ "queue-mode": "lazy"
+ },
+ "priority": 1
+ },
+ {
+ "vhost": "\/",
+ "name": "d-s",
+ "pattern": "^project-d-s-",
+ "apply-to": "queues",
+ "definition": {
+ "ha-mode": "exactly",
+ "ha-params": 3,
+ "ha-sync-mode": "automatic",
+ "expires": 604800000,
+ "queue-master-locator": "min-masters",
+ "ha-sync-batch-size": 100,
+ "queue-mode": "lazy"
+ },
+ "priority": 1
+ }
+ ],
+ "queues": [
+
+ ],
+ "exchanges": [
+ {
+ "name": "project.topic.default",
+ "vhost": "\/",
+ "type": "topic",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {
+
+ }
+ }
+ ],
+ "bindings": [
+
+ ]
+} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case10/case10a.json b/deps/rabbit/test/definition_import_SUITE_data/case10/case10a.json
new file mode 100644
index 0000000000..1eec5ccb9e
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case10/case10a.json
@@ -0,0 +1,67 @@
+{
+ "rabbit_version": "3.7.13",
+ "users": [
+ {
+ "name": "bunny_reader",
+ "password_hash": "ExmGdjBTmQEPxcW2z+dsOuPvjFbTBiYQgMByzfpE/IIXplYG",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "langohr_testbed"
+ },
+ {
+ "name": "bunny_testbed"
+ },
+ {
+ "name": "/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "bunny_reader",
+ "vhost": "bunny_testbed",
+ "configure": "^---$",
+ "write": "^---$",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [],
+ "parameters": [
+ {
+ "component": "vhost-limits",
+ "name": "limits",
+ "value": {
+ "max-connections": 14000
+ },
+ "vhost": "/"
+ }
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@localhost"
+ }
+ ],
+ "policies": [],
+ "queues": [
+ {
+ "name": "bunny.basic_consume0.1364356981103202",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.1364356981103202",
+ "vhost": "bunny_testbed",
+ "durable": true,
+ "auto_delete": true,
+ "arguments": {}
+ }
+ ],
+ "exchanges": [],
+ "bindings": []
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case10/case10b.json b/deps/rabbit/test/definition_import_SUITE_data/case10/case10b.json
new file mode 100644
index 0000000000..9eb48e341e
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case10/case10b.json
@@ -0,0 +1,595 @@
+{
+ "rabbit_version": "3.7.13",
+ "users": [
+ {
+ "name": "langohr",
+ "password_hash": "7p9PXlsYs92NlHSdNgPoDXmN77NqeGpzCTHpElq/wPS1eAEd",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "bunny_reader",
+ "password_hash": "ExmGdjBTmQEPxcW2z+dsOuPvjFbTBiYQgMByzfpE/IIXplYG",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "bunny_gem",
+ "password_hash": "8HH7uxmZS3FDldlYmHpFEE5+gWaeQaim8qpWIHkmNxuQK8xO",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "guest2",
+ "password_hash": "E04A7cvvsaDJBezc3Sc2jCnywe9oS4DX18qFe4dwkjIr26gf",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "monitoring"
+ },
+ {
+ "name": "guest",
+ "password_hash": "CPCbkNAHXgQ7vmrqwP9e7RWQsE8U2DqN7JA4ggS50c4LwDda",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ },
+ {
+ "name": "temp-user",
+ "password_hash": "CfUQkDeOYDrPkACDCjoF5zySbsXPIoMgNfv7FWfEpVFGegnL",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "management"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "langohr_testbed"
+ },
+ {
+ "name": "bunny_testbed"
+ },
+ {
+ "name": "/"
+ },
+ {
+ "name": "vhost3"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "bunny_reader",
+ "vhost": "bunny_testbed",
+ "configure": "^---$",
+ "write": "^---$",
+ "read": ".*"
+ },
+ {
+ "user": "bunny_gem",
+ "vhost": "bunny_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "langohr",
+ "vhost": "langohr_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "bunny_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "langohr_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "vhost3",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "langohr",
+ "vhost": "/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "temp-user",
+ "vhost": "/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [],
+ "parameters": [
+ {
+ "value": {
+ "expires": 3600000,
+ "uri": "amqp://localhost:5673"
+ },
+ "vhost": "/",
+ "component": "federation-upstream",
+ "name": "up-hare"
+ },
+ {
+ "value": {
+ "max-connections": 2000
+ },
+ "vhost": "/",
+ "component": "vhost-limits",
+ "name": "limits"
+ }
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@localhost"
+ }
+ ],
+ "policies": [],
+ "queues": [
+ {
+ "name": "bunny.basic_consume0.7103611911099639",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.6091120557781405",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.8661861002262826",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.3682573609392056",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.14855593896585362",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.9534242141484872",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.9434723539955824",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.12235844522013617",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.8370997977912426",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.4548488370639835",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.2289868670635532",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.00797124769641977",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "amq.gen-xddEPq9wHSNZKQbPK8pi3A",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": false,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.5195700828676673",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.3071859764599716",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "return",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "q1",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ },
+ {
+ "name": "declareArgs-deliveries-dead-letter",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test.rabbitmq-basic-nack",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.test.recovery.q1",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests2.queues.client-named.durable.non-exclusive.non-auto-deleted",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ },
+ {
+ "name": "test.tx.rollback",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test-integration-declared-passive-queue",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test.recover",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "amq.gen-7EZF7WjGIQFDoXexVF-e8w",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {
+ "x-message-ttl": 1500
+ }
+ },
+ {
+ "name": "test.integration.channel.error",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "confirm",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test.rabbitmq-message-ttl",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {
+ "x-message-ttl": 100
+ }
+ },
+ {
+ "name": "declareWithTTL",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {
+ "x-message-ttl": 9000000
+ }
+ },
+ {
+ "name": "test.tx.commit",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test.get-ok",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests2.queues.non-auto-deleted1",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "qv3",
+ "vhost": "vhost3",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ }
+ ],
+ "exchanges": [
+ {
+ "name": "bunny.tests.exchanges.fanout",
+ "vhost": "bunny_testbed",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "declareArgs-dead-letter",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic5",
+ "vhost": "/",
+ "type": "topic",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.extensions.altexchanges.direct1",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {
+ "alternate-exchange": "langohr.extensions.altexchanges.fanout1"
+ }
+ },
+ {
+ "name": "langohr.tests.exchanges.fanout1",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.direct3",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic4",
+ "vhost": "/",
+ "type": "topic",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.test.recovery.fanout2",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.fanout3",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.direct4",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic2",
+ "vhost": "/",
+ "type": "topic",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "test-integration-declared-passive-exchange",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "test-channel-still-exists",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic1",
+ "vhost": "/",
+ "type": "topic",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.fanout2",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.direct1",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.direct2",
+ "vhost": "/",
+ "type": "direct",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.headers2",
+ "vhost": "/",
+ "type": "headers",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic3",
+ "vhost": "/",
+ "type": "topic",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.fanout4",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ }
+ ],
+ "bindings": [
+ {
+ "source": "amq.fanout",
+ "vhost": "/",
+ "destination": "langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted",
+ "destination_type": "queue",
+ "routing_key": "",
+ "arguments": {}
+ },
+ {
+ "source": "declareArgs-dead-letter",
+ "vhost": "/",
+ "destination": "declareArgs-deliveries-dead-letter",
+ "destination_type": "queue",
+ "routing_key": "#",
+ "arguments": {}
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case11.json b/deps/rabbit/test/definition_import_SUITE_data/case11.json
new file mode 100644
index 0000000000..13afdf5cb5
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case11.json
@@ -0,0 +1,24 @@
+{
+ "rabbit_version": "3.8.0+rc.1.5.g9148053",
+ "rabbitmq_version": "3.8.0+rc.1.5.g9148053",
+ "queues": [
+ {
+ "name": "amq.queuebar",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ }
+ ],
+ "exchanges": [
+ {
+ "name": "amq.foobar",
+ "vhost": "/",
+ "type": "direct",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case13.json b/deps/rabbit/test/definition_import_SUITE_data/case13.json
new file mode 100644
index 0000000000..726aab1e6c
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case13.json
@@ -0,0 +1,55 @@
+{
+ "bindings": [],
+ "exchanges": [],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@localhost"
+ }
+ ],
+ "parameters": [],
+ "permissions": [
+ {
+ "configure": ".*",
+ "read": ".*",
+ "user": "guest",
+ "vhost": "/",
+ "write": ".*"
+ }
+ ],
+ "policies": [],
+ "queues": [
+ {
+ "arguments": {
+ "x-max-length": 991,
+ "x-queue-type": "quorum"
+ },
+ "auto_delete": false,
+ "durable": true,
+ "name": "definitions.import.case13.qq.1",
+ "type": "quorum",
+ "vhost": "/"
+ }
+ ],
+ "rabbit_version": "3.8.6.gad0c0bd",
+ "rabbitmq_version": "3.8.6.gad0c0bd",
+ "topic_permissions": [],
+ "users": [
+ {
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "name": "guest",
+ "password_hash": "e8lL5PHYcbv3Pd53EUoTOMnVDmsLDgVJXqSQMT+mrO4LVIdW",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "limits": [],
+ "metadata": {
+ "description": "Default virtual host",
+ "tags": []
+ },
+ "name": "/"
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case2.json b/deps/rabbit/test/definition_import_SUITE_data/case2.json
new file mode 100644
index 0000000000..0f0a014681
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case2.json
@@ -0,0 +1,49 @@
+{
+ "rabbit_version": "3.7.0-rc.1",
+ "users": [
+ {
+ "name": "guest",
+ "password_hash": "A0EX\/2hiwrIDKFS+nEqwbCGcVxwEkDBFF3mBfkNW53KFFk64",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "\/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "guest",
+ "vhost": "\/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [
+
+ ],
+ "parameters": [
+
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@mercurio"
+ }
+ ],
+ "policies": [
+
+ ],
+ "queues": [
+
+ ],
+ "exchanges": [
+
+ ],
+ "bindings": [
+
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case3.json b/deps/rabbit/test/definition_import_SUITE_data/case3.json
new file mode 100644
index 0000000000..963039f254
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case3.json
@@ -0,0 +1 @@
+{"rabbit_version":"3.7.0-alpha.381","users":[{"name":"admin","password_hash":"Edl2rJd/zLC187M1SKibRoTb6+xGkvkqoKWEq0kdNUbNLyLJ","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"}],"vhosts":[{"name":"/"}],"permissions":[{"user":"admin","vhost":"/","configure":".*","write":".*","read":".*"}],"topic_permissions":[],"parameters":[],"global_parameters":[{"name":"cluster_name","value":"rmq-gcp-37"}],"policies":[{"vhost":"/","name":"2-queue-replicas","pattern":".*","apply-to":"queues","definition":{"ha-mode":"exactly","ha-params":2,"ha-sync-mode":"automatic"},"priority":0}],"queues":[{"name":"wr-vary-load-lazy-persistent-1","vhost":"/","durable":true,"auto_delete":false,"arguments":{"x-queue-mode":"lazy"}},{"name":"wr-vary-load-lazy-transient-1","vhost":"/","durable":true,"auto_delete":false,"arguments":{"x-queue-mode":"lazy"}},{"name":"wr-vary-load-2","vhost":"/","durable":true,"auto_delete":false,"arguments":{"x-queue-mode":"default"}},{"name":"wr-vary-load-1","vhost":"/","durable":true,"auto_delete":false,"arguments":{"x-queue-mode":"default"}},{"name":"aliveness-test","vhost":"/","durable":false,"auto_delete":false,"arguments":{}}],"exchanges":[],"bindings":[{"source":"amq.direct","vhost":"/","destination":"wr-vary-load-2","destination_type":"queue","routing_key":"wr-vary-load-2","arguments":{}},{"source":"amq.direct","vhost":"/","destination":"wr-vary-load-lazy-persistent-1","destination_type":"queue","routing_key":"wr-vary-load-lazy-persistent-1","arguments":{}},{"source":"amq.direct","vhost":"/","destination":"wr-vary-load-lazy-transient-1","destination_type":"queue","routing_key":"wr-vary-load-lazy-transient-1","arguments":{}}]} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case4.json b/deps/rabbit/test/definition_import_SUITE_data/case4.json
new file mode 100644
index 0000000000..f5223ff3a2
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case4.json
@@ -0,0 +1,49 @@
+{
+ "bindings": [],
+ "exchanges": [],
+ "parameters": [],
+ "permissions": [
+ {
+ "configure": ".*",
+ "read": ".*",
+ "user": "guest",
+ "vhost": "/",
+ "write": ".*"
+ }
+ ],
+ "policies": [
+ {
+ "apply-to": "all",
+ "definition": {
+ "queue-master-locator": "client-local"
+ },
+ "name": "abc",
+ "pattern": "^abc\\.",
+ "priority": 0,
+ "vhost": "/"
+ }
+ ],
+ "queues": [
+ {
+ "arguments": {},
+ "auto_delete": false,
+ "durable": false,
+ "name": "abc.def",
+ "vhost": "/"
+ }
+ ],
+ "rabbit_version": "0.0.0",
+ "users": [
+ {
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "name": "guest",
+ "password_hash": "QM532K822VTbYBFbwSZEnT8jkH8TT0dPsUtja6vL0myfsrmk",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "/"
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case5.json b/deps/rabbit/test/definition_import_SUITE_data/case5.json
new file mode 100644
index 0000000000..607dfd3d1f
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case5.json
@@ -0,0 +1,63 @@
+{
+ "rabbit_version": "3.7.2",
+ "users": [
+ {
+ "name": "guest",
+ "password_hash": "PD4MQV8Ivcprh1\/yUS9x7jkpbXtWIZLTQ0tvnZPncpI6Ui0a",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "\/"
+ },
+ {
+ "name": "vhost2"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "guest",
+ "vhost": "\/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "vhost2",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [
+
+ ],
+ "parameters": [
+
+ ],
+ "global_parameters": [
+ {
+ "name": "mqtt_port_to_vhost_mapping",
+ "value": {
+ "1883": "\/",
+ "1884": "vhost2"
+ }
+ },
+ {
+ "name": "cluster_name",
+ "value": "rabbitmq@localhost"
+ }
+ ],
+ "policies": [
+ ],
+ "queues": [
+ ],
+ "exchanges": [
+ ],
+ "bindings": [
+
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case6.json b/deps/rabbit/test/definition_import_SUITE_data/case6.json
new file mode 100644
index 0000000000..c0debb7de1
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case6.json
@@ -0,0 +1,47 @@
+{
+ "rabbit_version": "3.7.3+10.g41ec73b",
+ "users": [
+ {
+ "name": "guest",
+ "password_hash": "J+UiUxNQ3I8uPn6Lo2obWcl93VgXgbw4R+xhl3L5zHwkRFZG",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "guest",
+ "vhost": "/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [],
+ "parameters": [
+ {
+ "value": {
+ "max-queues": 456,
+ "max-connections": 123
+ },
+ "vhost": "/",
+ "component": "vhost-limits",
+ "name": "limits"
+ }
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@localhost.localdomain"
+ }
+ ],
+ "policies": [],
+ "queues": [],
+ "exchanges": [],
+ "bindings": []
+} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case7.json b/deps/rabbit/test/definition_import_SUITE_data/case7.json
new file mode 100644
index 0000000000..7a8e0174ac
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case7.json
@@ -0,0 +1,398 @@
+
+
+{
+ "rabbit_version": "3.7.4",
+ "users": [
+ {
+ "name": "bunny_reader",
+ "password_hash": "rgJkcwpypdpIVhbLDj7CaCtFVg6Dyj3yQDcCbhyn29u49c88",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "bunny_gem",
+ "password_hash": "fHFOkIlJ8iohrhN4IQXIzIDrxsOfaekv97wA1W\/0N\/uxTWjE",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "guest",
+ "password_hash": "ujflQBzsAaAfbNSLAy4y2iG9mMpgATaH5oXQfPLkxOhE1yzH",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "bunny_testbed"
+ },
+ {
+ "name": "\/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "bunny_reader",
+ "vhost": "bunny_testbed",
+ "configure": "^---$",
+ "write": "^---$",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "\/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "bunny_gem",
+ "vhost": "bunny_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "bunny_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [
+
+ ],
+ "parameters": [
+ {
+ "value": {
+ "pattern": "^apd\\\\.mce\\\\.estarchive.*$",
+ "definition": {
+ "max-length-bytes": 200000000
+ },
+ "priority": 0,
+ "apply-to": "queues"
+ },
+ "vhost": "\/",
+ "component": "operator_policy",
+ "name": "apd-mce-estarchive"
+ }
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@warp10"
+ }
+ ],
+ "policies": [
+
+ ],
+ "queues": [
+ {
+ "name": "test",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "reply",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "ack-test",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "nack-test",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "redelivered-test",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "unsub02",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "known3",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "ack-test-tx",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "stomp-subscription-As-CQ37wutHLc9H0PmjIPw",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "unsub01",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test-receipt-tx",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "unsub03",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test2",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "known",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "unsub04",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "stomp-subscription-j7FLeUn7ehTatYVNiBy6UA",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "ir",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "nack-multi",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test-receipt",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "reliability",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "duplicate-consumer-tag-test2",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test-multi",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test3",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "stomp-subscription-tMbeqL30tjlgaXMmaFM6Ew",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "nack-test-no-requeue",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "ack-test-individual",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "known2",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "custom-header",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "stomp-subscription-eWSXV2ty1R7VqfsnULKEkA",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ }
+ ],
+ "exchanges": [
+
+ ],
+ "bindings": [
+ {
+ "source": "amq.topic",
+ "vhost": "\/",
+ "destination": "stomp-subscription-tMbeqL30tjlgaXMmaFM6Ew",
+ "destination_type": "queue",
+ "routing_key": "durable",
+ "arguments": {
+
+ }
+ },
+ {
+ "source": "amq.topic",
+ "vhost": "\/",
+ "destination": "stomp-subscription-eWSXV2ty1R7VqfsnULKEkA",
+ "destination_type": "queue",
+ "routing_key": "durable-separate",
+ "arguments": {
+
+ }
+ },
+ {
+ "source": "amq.topic",
+ "vhost": "\/",
+ "destination": "stomp-subscription-j7FLeUn7ehTatYVNiBy6UA",
+ "destination_type": "queue",
+ "routing_key": "durable-separate",
+ "arguments": {
+
+ }
+ },
+ {
+ "source": "amq.topic",
+ "vhost": "\/",
+ "destination": "stomp-subscription-As-CQ37wutHLc9H0PmjIPw",
+ "destination_type": "queue",
+ "routing_key": "durable-shared",
+ "arguments": {
+
+ }
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case8.json b/deps/rabbit/test/definition_import_SUITE_data/case8.json
new file mode 100644
index 0000000000..1deb55b45c
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case8.json
@@ -0,0 +1,17 @@
+{
+ "rabbit_version": "3.8.0+beta.1.6.g0c7c7d9",
+ "parameters": [
+ {
+ "value": {
+ "max-connections": 6767
+ },
+ "vhost": "/",
+ "component": "vhost-limits",
+ "name": "limits"
+ }
+ ],
+ "policies": [],
+ "queues": [],
+ "exchanges": [],
+ "bindings": []
+} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case9/case9a.json b/deps/rabbit/test/definition_import_SUITE_data/case9/case9a.json
new file mode 100644
index 0000000000..2e7a77962d
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case9/case9a.json
@@ -0,0 +1 @@
+{"rabbit_version":"3.7.13","users":[{"name":"langohr","password_hash":"7p9PXlsYs92NlHSdNgPoDXmN77NqeGpzCTHpElq/wPS1eAEd","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"bunny_reader","password_hash":"ExmGdjBTmQEPxcW2z+dsOuPvjFbTBiYQgMByzfpE/IIXplYG","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"bunny_gem","password_hash":"8HH7uxmZS3FDldlYmHpFEE5+gWaeQaim8qpWIHkmNxuQK8xO","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"guest","password_hash":"CPCbkNAHXgQ7vmrqwP9e7RWQsE8U2DqN7JA4ggS50c4LwDda","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"},{"name":"temp-user","password_hash":"CfUQkDeOYDrPkACDCjoF5zySbsXPIoMgNfv7FWfEpVFGegnL","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"management"}],"vhosts":[{"name":"langohr_testbed"},{"name":"bunny_testbed"},{"name":"/"}],"permissions":[{"user":"bunny_reader","vhost":"bunny_testbed","configure":"^---$","write":"^---$","read":".*"},{"user":"bunny_gem","vhost":"bunny_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"langohr","vhost":"langohr_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"bunny_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"langohr_testbed","configure":".*","write":".*","read":".*"},{"user":"langohr","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"temp-user","vhost":"/","configure":".*","write":".*","read":".*"}],"topic_permissions":[],"parameters":[],"global_parameters":[{"name":"cluster_name","value":"rabbit@localhost"}],"policies":[],"queues":[{"name":"bunny.basic_consume0.1364356981103202","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"return","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"q1","vhost":"/","durable":true,"auto_delete":false,"arguments":{}},{"name":"declareArgs-deliveries-dead-letter","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.rabbitmq-basic-nack","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.test.recovery.q1","vhost":"/","durable":true,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.client-named.durable.non-exclusive.non-auto-deleted","vhost":"/","durable":true,"auto_delete":false,"arguments":{}},{"name":"test.tx.rollback","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test-integration-declared-passive-queue","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.recover","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"amq.gen-7EZF7WjGIQFDoXexVF-e8w","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":1500}},{"name":"test.integration.channel.error","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"confirm","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.rabbitmq-message-ttl","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":100}},{"name":"declareWithTTL","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":9000000}},{"name":"test.tx.commit","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.get-ok","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.non-auto-deleted1","vhost":"/","durable":false,"auto_delete":true,"arguments":{}}],"exchanges":[{"name":"declareArgs-dead-letter","vhost":"/","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic5","vhost":"/","type":"topic","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.extensions.altexchanges.direct1","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{"alternate-exchange":"langohr.extensions.altexchanges.fanout1"}},{"name":"langohr.tests.exchanges.fanout1","vhost":"/","type":"fanout","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct3","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic4","vhost":"/","type":"topic","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout3","vhost":"/","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct4","vhost":"/","type":"direct","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic2","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"test-integration-declared-passive-exchange","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"test-channel-still-exists","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic1","vhost":"/","type":"topic","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout2","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct1","vhost":"/","type":"direct","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct2","vhost":"/","type":"direct","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.headers2","vhost":"/","type":"headers","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic3","vhost":"/","type":"topic","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.test.recovery.fanout1","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout4","vhost":"/","type":"fanout","durable":false,"auto_delete":false,"internal":false,"arguments":{}}],"bindings":[{"source":"amq.fanout","vhost":"/","destination":"langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted","destination_type":"queue","routing_key":"","arguments":{}},{"source":"declareArgs-dead-letter","vhost":"/","destination":"declareArgs-deliveries-dead-letter","destination_type":"queue","routing_key":"#","arguments":{}}]} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case9/case9b.json b/deps/rabbit/test/definition_import_SUITE_data/case9/case9b.json
new file mode 100644
index 0000000000..7cadd58b17
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case9/case9b.json
@@ -0,0 +1 @@
+{"rabbit_version":"3.7.13","users":[{"name":"langohr","password_hash":"7p9PXlsYs92NlHSdNgPoDXmN77NqeGpzCTHpElq/wPS1eAEd","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"bunny_reader","password_hash":"ExmGdjBTmQEPxcW2z+dsOuPvjFbTBiYQgMByzfpE/IIXplYG","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"bunny_gem","password_hash":"8HH7uxmZS3FDldlYmHpFEE5+gWaeQaim8qpWIHkmNxuQK8xO","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"guest2","password_hash":"E04A7cvvsaDJBezc3Sc2jCnywe9oS4DX18qFe4dwkjIr26gf","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"monitoring"},{"name":"guest","password_hash":"CPCbkNAHXgQ7vmrqwP9e7RWQsE8U2DqN7JA4ggS50c4LwDda","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"},{"name":"temp-user","password_hash":"CfUQkDeOYDrPkACDCjoF5zySbsXPIoMgNfv7FWfEpVFGegnL","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"management"}],"vhosts":[{"name":"langohr_testbed"},{"name":"bunny_testbed"},{"name":"/"},{"name":"vhost3"}],"permissions":[{"user":"bunny_reader","vhost":"bunny_testbed","configure":"^---$","write":"^---$","read":".*"},{"user":"bunny_gem","vhost":"bunny_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"langohr","vhost":"langohr_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"bunny_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"langohr_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"vhost3","configure":".*","write":".*","read":".*"},{"user":"langohr","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"temp-user","vhost":"/","configure":".*","write":".*","read":".*"}],"topic_permissions":[],"parameters":[{"value":{"max-connections":2000},"vhost":"/","component":"vhost-limits","name":"limits"}],"global_parameters":[{"name":"cluster_name","value":"rabbit@localhost"}],"policies":[],"queues":[{"name":"bunny.basic_consume0.7103611911099639","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.6091120557781405","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.8661861002262826","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.3682573609392056","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.14855593896585362","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.9534242141484872","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.9434723539955824","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.12235844522013617","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.8370997977912426","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.4548488370639835","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.2289868670635532","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.00797124769641977","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"amq.gen-xddEPq9wHSNZKQbPK8pi3A","vhost":"bunny_testbed","durable":false,"auto_delete":false,"arguments":{}},{"name":"bunny.basic_consume0.5195700828676673","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.3071859764599716","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"return","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"q1","vhost":"/","durable":true,"auto_delete":false,"arguments":{}},{"name":"declareArgs-deliveries-dead-letter","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.rabbitmq-basic-nack","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.test.recovery.q1","vhost":"/","durable":true,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.client-named.durable.non-exclusive.non-auto-deleted","vhost":"/","durable":true,"auto_delete":false,"arguments":{}},{"name":"test.tx.rollback","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test-integration-declared-passive-queue","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.recover","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"amq.gen-7EZF7WjGIQFDoXexVF-e8w","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":1500}},{"name":"test.integration.channel.error","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"confirm","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.rabbitmq-message-ttl","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":100}},{"name":"declareWithTTL","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":9000000}},{"name":"test.tx.commit","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.get-ok","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.non-auto-deleted1","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"qv3","vhost":"vhost3","durable":true,"auto_delete":false,"arguments":{}}],"exchanges":[{"name":"bunny.tests.exchanges.fanout","vhost":"bunny_testbed","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"declareArgs-dead-letter","vhost":"/","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic5","vhost":"/","type":"topic","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.extensions.altexchanges.direct1","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{"alternate-exchange":"langohr.extensions.altexchanges.fanout1"}},{"name":"langohr.tests.exchanges.fanout1","vhost":"/","type":"fanout","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct3","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic4","vhost":"/","type":"topic","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.test.recovery.fanout2","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout3","vhost":"/","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct4","vhost":"/","type":"direct","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic2","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"test-integration-declared-passive-exchange","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"test-channel-still-exists","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic1","vhost":"/","type":"topic","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout2","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct1","vhost":"/","type":"direct","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct2","vhost":"/","type":"direct","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.headers2","vhost":"/","type":"headers","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic3","vhost":"/","type":"topic","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout4","vhost":"/","type":"fanout","durable":false,"auto_delete":false,"internal":false,"arguments":{}}],"bindings":[{"source":"amq.fanout","vhost":"/","destination":"langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted","destination_type":"queue","routing_key":"","arguments":{}},{"source":"declareArgs-dead-letter","vhost":"/","destination":"declareArgs-deliveries-dead-letter","destination_type":"queue","routing_key":"#","arguments":{}}]} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/failing_case12.json b/deps/rabbit/test/definition_import_SUITE_data/failing_case12.json
new file mode 100644
index 0000000000..6ce0366a70
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/failing_case12.json
@@ -0,0 +1,24 @@
+{
+ "rabbit_version": "3.8.0+rc.1.5.g9148053",
+ "rabbitmq_version": "3.8.0+rc.1.5.g9148053",
+ "queues": [
+ {
+ "name": "amq.queuebar",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ }
+ ],
+ "exchanges": [
+ {
+ "name": "invalid_type",
+ "vhost": "/",
+ "type": "definitly not direct",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ }
+ ]
+}
diff --git a/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl b/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl
new file mode 100644
index 0000000000..820e13efa0
--- /dev/null
+++ b/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl
@@ -0,0 +1,111 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(disconnect_detected_during_alarm_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, disconnect_detected_during_alarm}
+ ].
+
+groups() ->
+ [
+ %% Test previously executed with the multi-node target.
+ {disconnect_detected_during_alarm, [], [
+ disconnect_detected_during_alarm %% Trigger alarm.
+ ]}
+ ].
+
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+end_per_group1(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% ---------------------------------------------------------------------------
+%% Testcase
+%% ---------------------------------------------------------------------------
+
+disconnect_detected_during_alarm(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% Set a low memory high watermark.
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+ ["set_vm_memory_high_watermark", "0.000000001"]),
+
+ %% Open a connection and a channel.
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, A, tcp_port_amqp),
+ Heartbeat = 1,
+ {ok, Conn} = amqp_connection:start(
+ #amqp_params_network{port = Port,
+ heartbeat = Heartbeat}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ amqp_connection:register_blocked_handler(Conn, self()),
+ Publish = #'basic.publish'{routing_key = <<"nowhere-to-go">>},
+ amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}),
+ receive
+ % Check that connection was indeed blocked
+ #'connection.blocked'{} -> ok
+ after
+ 1000 -> exit(connection_was_not_blocked)
+ end,
+
+ %% Connection is blocked, now we should forcefully kill it
+ {'EXIT', _} = (catch amqp_connection:close(Conn, 10)),
+
+ ListConnections =
+ fun() ->
+ rpc:call(A, rabbit_networking, connection_info_all, [])
+ end,
+
+ %% We've already disconnected, but blocked connection still should still linger on.
+ [SingleConn] = ListConnections(),
+ blocked = rabbit_misc:pget(state, SingleConn),
+
+ %% It should definitely go away after 2 heartbeat intervals.
+ timer:sleep(round(2.5 * 1000 * Heartbeat)),
+ [] = ListConnections(),
+
+ passed.
diff --git a/deps/rabbit/test/dummy_event_receiver.erl b/deps/rabbit/test/dummy_event_receiver.erl
new file mode 100644
index 0000000000..3d417b601b
--- /dev/null
+++ b/deps/rabbit/test/dummy_event_receiver.erl
@@ -0,0 +1,49 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dummy_event_receiver).
+
+-export([start/3, stop/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include("rabbit.hrl").
+
+start(Pid, Nodes, Types) ->
+ Oks = [ok || _ <- Nodes],
+ {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
+ [rabbit_event, ?MODULE, [Pid, Types]]).
+
+stop() ->
+ gen_event:delete_handler(rabbit_event, ?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([Pid, Types]) ->
+ {ok, {Pid, Types}}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
+ case lists:member(Type, Types) of
+ true -> Pid ! Event;
+ false -> ok
+ end,
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
diff --git a/deps/rabbit/test/dummy_interceptor.erl b/deps/rabbit/test/dummy_interceptor.erl
new file mode 100644
index 0000000000..6d510a3073
--- /dev/null
+++ b/deps/rabbit/test/dummy_interceptor.erl
@@ -0,0 +1,26 @@
+-module(dummy_interceptor).
+
+-behaviour(rabbit_channel_interceptor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+
+-compile(export_all).
+
+init(_Ch) ->
+ undefined.
+
+description() ->
+ [{description,
+ <<"Empties payload on publish">>}].
+
+intercept(#'basic.publish'{} = Method, Content, _IState) ->
+ Content2 = Content#content{payload_fragments_rev = []},
+ {Method, Content2};
+
+intercept(Method, Content, _VHost) ->
+ {Method, Content}.
+
+applies_to() ->
+ ['basic.publish'].
diff --git a/deps/rabbit/test/dummy_runtime_parameters.erl b/deps/rabbit/test/dummy_runtime_parameters.erl
new file mode 100644
index 0000000000..01d0b74f95
--- /dev/null
+++ b/deps/rabbit/test/dummy_runtime_parameters.erl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dummy_runtime_parameters).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include("rabbit.hrl").
+
+-export([validate/5, notify/5, notify_clear/4]).
+-export([register/0, unregister/0]).
+-export([validate_policy/1]).
+-export([register_policy_validator/0, unregister_policy_validator/0]).
+
+%----------------------------------------------------------------------------
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
+
+unregister() ->
+ rabbit_registry:unregister(runtime_parameter, <<"test">>).
+
+validate(_, <<"test">>, <<"good">>, _Term, _User) -> ok;
+validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, none) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, User) ->
+ case lists:member(administrator, User#user.tags) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+validate(_, <<"test">>, _, _, _) -> {error, "meh", []}.
+
+notify(_, _, _, _, _) -> ok.
+notify_clear(_, _, _, _) -> ok.
+
+%----------------------------------------------------------------------------
+
+register_policy_validator() ->
+ rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
+ rabbit_registry:register(policy_validator, <<"testpos">>, ?MODULE).
+
+unregister_policy_validator() ->
+ rabbit_registry:unregister(policy_validator, <<"testeven">>),
+ rabbit_registry:unregister(policy_validator, <<"testpos">>).
+
+validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
+ case length(Terms) rem 2 =:= 0 of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
+ case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy(_) ->
+ {error, "meh", []}.
diff --git a/deps/rabbit/test/dummy_supervisor2.erl b/deps/rabbit/test/dummy_supervisor2.erl
new file mode 100644
index 0000000000..354b3a0854
--- /dev/null
+++ b/deps/rabbit/test/dummy_supervisor2.erl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dummy_supervisor2).
+
+-behaviour(supervisor2).
+
+-export([
+ start_link/0,
+ init/1
+ ]).
+
+start_link() ->
+ Pid = spawn_link(fun () ->
+ process_flag(trap_exit, true),
+ receive stop -> ok end
+ end),
+ {ok, Pid}.
+
+init([Timeout]) ->
+ {ok, {{one_for_one, 0, 1},
+ [{test_sup, {supervisor2, start_link,
+ [{local, ?MODULE}, ?MODULE, []]},
+ transient, Timeout, supervisor, [?MODULE]}]}};
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{test_worker, {?MODULE, start_link, []},
+ temporary, 1000, worker, [?MODULE]}]}}.
diff --git a/deps/rabbit/test/dynamic_ha_SUITE.erl b/deps/rabbit/test/dynamic_ha_SUITE.erl
new file mode 100644
index 0000000000..85969135b6
--- /dev/null
+++ b/deps/rabbit/test/dynamic_ha_SUITE.erl
@@ -0,0 +1,1034 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dynamic_ha_SUITE).
+
+%% rabbit_tests:test_dynamic_mirroring() is a unit test which should
+%% test the logic of what all the policies decide to do, so we don't
+%% need to exhaustively test that here. What we need to test is that:
+%%
+%% * Going from non-mirrored to mirrored works and vice versa
+%% * Changing policy can add / remove mirrors and change the master
+%% * Adding a node will create a new mirror when there are not enough nodes
+%% for the policy
+%% * Removing a node will not create a new mirror even if the policy
+%% logic wants it (since this gives us a good way to lose messages
+%% on cluster shutdown, by repeated failover to new nodes)
+%%
+%% The first two are change_policy, the last two are change_cluster
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"ha.test">>).
+-define(POLICY, <<"^ha.test$">>). %% " emacs
+-define(VHOST, <<"/">>).
+
+all() ->
+ [
+ {group, unclustered},
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {unclustered, [], [
+ {cluster_size_5, [], [
+ change_cluster
+ ]}
+ ]},
+ {clustered, [], [
+ {cluster_size_2, [], [
+ vhost_deletion,
+ force_delete_if_no_master,
+ promote_on_shutdown,
+ promote_on_failure,
+ follower_recovers_after_vhost_failure,
+ follower_recovers_after_vhost_down_and_up,
+ master_migrates_on_vhost_down,
+ follower_recovers_after_vhost_down_and_master_migrated,
+ queue_survive_adding_dead_vhost_mirror,
+ dynamic_mirroring
+ ]},
+ {cluster_size_3, [], [
+ change_policy,
+ rapid_change,
+ nodes_policy_should_pick_master_from_its_params,
+ promote_follower_after_standalone_restart,
+ queue_survive_adding_dead_vhost_mirror,
+ rebalance_all,
+ rebalance_exactly,
+ rebalance_nodes,
+ rebalance_multiple_blocked
+ ]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(unclustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]);
+init_per_group(cluster_size_5, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+dynamic_mirroring(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, dynamic_mirroring1, [Config]).
+
+dynamic_mirroring1(_Config) ->
+ %% Just unit tests of the node selection logic, see multi node
+ %% tests for the rest...
+ Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params,
+ {MNode, SNodes, SSNodes}, All) ->
+ {ok, M} = rabbit_mirror_queue_misc:module(Policy),
+ {NewM, NewSs0} = M:suggested_queue_nodes(
+ Params, MNode, SNodes, SSNodes, All),
+ NewSs1 = lists:sort(NewSs0),
+ case dm_list_match(NewSs, NewSs1, ExtraSs) of
+ ok -> ok;
+ error -> exit({no_match, NewSs, NewSs1, ExtraSs})
+ end
+ end,
+
+ Test({a,[b,c],0},<<"all">>,'_',{a,[], []}, [a,b,c]),
+ Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]),
+ Test({a,[b,c],0},<<"all">>,'_',{a,[d], [d]}, [a,b,c]),
+
+ N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end,
+
+ %% Add a node
+ Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]),
+ Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]),
+ %% Add two nodes and drop one
+ Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]),
+ %% Don't try to include nodes that are not running
+ Test({a,[b], 0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]),
+ %% If we can't find any of the nodes listed then just keep the master
+ Test({a,[], 0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]),
+ %% And once that's happened, still keep the master even when not listed,
+ %% if nothing is synced
+ Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[], []}, [a,b,c,d]),
+ Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[b],[]}, [a,b,c,d]),
+ %% But if something is synced we can lose the master - but make
+ %% sure we pick the new master from the nodes which are synced!
+ Test({b,[c], 0},<<"nodes">>,N([b,c]), {a,[b],[b]},[a,b,c,d]),
+ Test({b,[c], 0},<<"nodes">>,N([c,b]), {a,[b],[b]},[a,b,c,d]),
+
+ Test({a,[], 1},<<"exactly">>,2,{a,[], []}, [a,b,c,d]),
+ Test({a,[], 2},<<"exactly">>,3,{a,[], []}, [a,b,c,d]),
+ Test({a,[c], 0},<<"exactly">>,2,{a,[c], [c]}, [a,b,c,d]),
+ Test({a,[c], 1},<<"exactly">>,3,{a,[c], [c]}, [a,b,c,d]),
+ Test({a,[c], 0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]),
+ Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]),
+
+ passed.
+
+%% Does the first list match the second where the second is required
+%% to have exactly Extra superfluous items?
+dm_list_match([], [], 0) -> ok;
+dm_list_match(_, [], _Extra) -> error;
+dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra);
+dm_list_match(L1, [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1).
+
+change_policy(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ %% When we first declare a queue with no policy, it's not HA.
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+ timer:sleep(200),
+ assert_followers(A, ?QNAME, {A, ''}),
+
+ %% Give it policy "all", it becomes HA and gets all mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>),
+ assert_followers(A, ?QNAME, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+
+ %% Give it policy "nodes", it gets specific mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(A),
+ rabbit_misc:atom_to_binary(B)]}),
+ assert_followers(A, ?QNAME, {A, [B]}, [{A, [B, C]}]),
+
+ %% Now explicitly change the mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(A),
+ rabbit_misc:atom_to_binary(C)]}),
+ assert_followers(A, ?QNAME, {A, [C]}, [{A, [B, C]}]),
+
+ %% Clear the policy, and we go back to non-mirrored
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY),
+ assert_followers(A, ?QNAME, {A, ''}),
+
+ %% Test switching "away" from an unmirrored node
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(B),
+ rabbit_misc:atom_to_binary(C)]}),
+ assert_followers(A, ?QNAME, {B, [C]}, [{A, []}, {A, [B]}, {A, [C]}, {A, [B, C]}]),
+
+ ok.
+
+change_cluster(Config) ->
+ [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:cluster_nodes(Config, [A, B, C]),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+ assert_followers(A, ?QNAME, {A, ''}),
+
+ %% Give it policy exactly 4, it should mirror to all 3 nodes
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, {<<"exactly">>, 4}),
+ assert_followers(A, ?QNAME, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+
+ %% Add D and E, D or E joins in
+ rabbit_ct_broker_helpers:cluster_nodes(Config, [A, D, E]),
+ assert_followers(A, ?QNAME, [{A, [B, C, D]}, {A, [B, C, E]}], [{A, [B, C]}]),
+
+ %% Remove one, the other joins in
+ rabbit_ct_broker_helpers:stop_node(Config, D),
+ assert_followers(A, ?QNAME, [{A, [B, C, D]}, {A, [B, C, E]}], [{A, [B, C]}]),
+
+ ok.
+
+rapid_change(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ {_Pid, MRef} = spawn_monitor(
+ fun() ->
+ [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)]
+ end),
+ rapid_loop(Config, A, MRef),
+ ok.
+
+rapid_amqp_ops(Ch, I) ->
+ Payload = list_to_binary(integer_to_list(I)),
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}),
+ amqp_channel:cast(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = ?QNAME},
+ #amqp_msg{payload = Payload}),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = ?QNAME,
+ no_ack = true}, self()),
+ receive #'basic.consume_ok'{} -> ok
+ end,
+ receive {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+ ok
+ end,
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}).
+
+rapid_loop(Config, Node, MRef) ->
+ receive
+ {'DOWN', MRef, process, _Pid, normal} ->
+ ok;
+ {'DOWN', MRef, process, _Pid, Reason} ->
+ exit({amqp_ops_died, Reason})
+ after 0 ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Node, ?POLICY,
+ <<"all">>),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, Node, ?POLICY),
+ rapid_loop(Config, Node, MRef)
+ end.
+
+queue_survive_adding_dead_vhost_mirror(Config) ->
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, 1, <<"/">>),
+ NodeA = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, NodeA),
+ QName = <<"queue_survive_adding_dead_vhost_mirror-q-1">>,
+ amqp_channel:call(ChA, #'queue.declare'{queue = QName}),
+ Q = find_queue(QName, NodeA),
+ Pid = proplists:get_value(pid, Q),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ %% Queue should not fail
+ Q1 = find_queue(QName, NodeA),
+ Pid = proplists:get_value(pid, Q1).
+
+%% Vhost deletion needs to successfully tear down policies and queues
+%% with policies. At least smoke-test that it doesn't blow up.
+vhost_deletion(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ amqp_channel:call(ACh, #'queue.declare'{queue = <<"vhost_deletion-q">>}),
+ ok = rpc:call(A, rabbit_vhost, delete, [<<"/">>, <<"acting-user">>]),
+ ok.
+
+force_delete_if_no_master(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>,
+ <<"all">>),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ [begin
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q,
+ durable = true}),
+ rabbit_ct_client_helpers:publish(ACh, Q, 10)
+ end || Q <- [<<"ha.nopromote.test1">>, <<"ha.nopromote.test2">>]],
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, A),
+
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.nopromote.test1">>,
+ durable = true})),
+
+ BCh1 = rabbit_ct_client_helpers:open_channel(Config, B),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh1, #'queue.declare'{queue = <<"ha.nopromote.test2">>,
+ durable = true})),
+ BCh2 = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.delete_ok'{} =
+ amqp_channel:call(BCh2, #'queue.delete'{queue = <<"ha.nopromote.test1">>}),
+ %% Delete with if_empty will fail, since we don't know if the queue is empty
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(BCh2, #'queue.delete'{queue = <<"ha.nopromote.test2">>,
+ if_empty = true})),
+ BCh3 = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.delete_ok'{} =
+ amqp_channel:call(BCh3, #'queue.delete'{queue = <<"ha.nopromote.test2">>}),
+ ok.
+
+promote_on_failure(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>,
+ <<"all">>, [{<<"ha-promote-on-failure">>, <<"always">>}]),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>,
+ <<"all">>, [{<<"ha-promote-on-failure">>, <<"when-synced">>}]),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ [begin
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q,
+ durable = true}),
+ rabbit_ct_client_helpers:publish(ACh, Q, 10)
+ end || Q <- [<<"ha.promote.test">>, <<"ha.nopromote.test">>]],
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, A),
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{message_count = 0} =
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.promote.test">>,
+ durable = true}),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true})),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ ACh2 = rabbit_ct_client_helpers:open_channel(Config, A),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true}),
+ ok.
+
+promote_on_shutdown(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>,
+ <<"all">>, [{<<"ha-promote-on-shutdown">>, <<"always">>}]),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>,
+ <<"all">>),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromoteonfailure">>,
+ <<"all">>, [{<<"ha-promote-on-failure">>, <<"when-synced">>},
+ {<<"ha-promote-on-shutdown">>, <<"always">>}]),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ [begin
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q,
+ durable = true}),
+ rabbit_ct_client_helpers:publish(ACh, Q, 10)
+ end || Q <- [<<"ha.promote.test">>,
+ <<"ha.nopromote.test">>,
+ <<"ha.nopromoteonfailure.test">>]],
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, A),
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ BCh1 = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{message_count = 0} =
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.promote.test">>,
+ durable = true}),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh1, #'queue.declare'{queue = <<"ha.nopromoteonfailure.test">>,
+ durable = true})),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ ACh2 = rabbit_ct_client_helpers:open_channel(Config, A),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true}),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = <<"ha.nopromoteonfailure.test">>,
+ durable = true}),
+ ok.
+
+nodes_policy_should_pick_master_from_its_params(Config) ->
+ [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A], [all])),
+ %% --> Master: A
+ %% Slaves: [B, C] or [C, B]
+ SSPids = ?awaitMatch(SSPids when is_list(SSPids),
+ proplists:get_value(synchronised_slave_pids,
+ find_queue(?QNAME, A)),
+ 10000),
+
+ %% Choose mirror that isn't the first sync mirror. Cover a bug that always
+ %% chose the first, even if it was not part of the policy
+ LastSlave = node(lists:last(SSPids)),
+ ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A],
+ [{nodes, [LastSlave]}])),
+ %% --> Master: B or C (depends on the order of current mirrors )
+ %% Slaves: []
+
+ %% Now choose a new master that isn't synchronised. The previous
+ %% policy made sure that the queue only runs on one node (the last
+ %% from the initial synchronised list). Thus, by taking the first
+ %% node from this list, we know it is not synchronised.
+ %%
+ %% Because the policy doesn't cover any synchronised mirror, RabbitMQ
+ %% should instead use an existing synchronised mirror as the new master,
+ %% even though that isn't in the policy.
+ ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A],
+ [{nodes, [LastSlave, A]}])),
+ %% --> Master: B or C (same as previous policy)
+ %% Slaves: [A]
+
+ NewMaster = node(erlang:hd(SSPids)),
+ ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A],
+ [{nodes, [NewMaster]}])),
+ %% --> Master: B or C (the other one compared to previous policy)
+ %% Slaves: []
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}),
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY).
+
+follower_recovers_after_vhost_failure(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"follower_recovers_after_vhost_failure-q">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName}),
+ timer:sleep(500),
+ assert_followers(A, QName, {A, [B]}, [{A, []}]),
+
+ %% Crash vhost on a node hosting a mirror
+ {ok, Sup} = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_vhost_sup_sup, get_vhost_sup, [<<"/">>]),
+ exit(Sup, foo),
+
+ assert_followers(A, QName, {A, [B]}, [{A, []}]).
+
+follower_recovers_after_vhost_down_and_up(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"follower_recovers_after_vhost_down_and_up-q">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName}),
+ timer:sleep(200),
+ assert_followers(A, QName, {A, [B]}, [{A, []}]),
+
+ %% Crash vhost on a node hosting a mirror
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, B, <<"/">>),
+ %% rabbit_ct_broker_helpers:force_vhost_failure/2 will retry up to 10 times to
+ %% make sure that the top vhost supervision tree process did go down. MK.
+ timer:sleep(500),
+ %% Vhost is back up
+ case rabbit_ct_broker_helpers:rpc(Config, B, rabbit_vhost_sup_sup, start_vhost, [<<"/">>]) of
+ {ok, _Sup} -> ok;
+ {error,{already_started, _Sup}} -> ok
+ end,
+
+ assert_followers(A, QName, {A, [B]}, [{A, []}]).
+
+master_migrates_on_vhost_down(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"master_migrates_on_vhost_down-q">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName}),
+ timer:sleep(500),
+ assert_followers(A, QName, {A, [B]}, [{A, []}]),
+
+ %% Crash vhost on the node hosting queue master
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, A, <<"/">>),
+ timer:sleep(500),
+ assert_followers(A, QName, {B, []}).
+
+follower_recovers_after_vhost_down_and_master_migrated(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"follower_recovers_after_vhost_down_and_master_migrated-q">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName}),
+ timer:sleep(500),
+ assert_followers(A, QName, {A, [B]}, [{A, []}]),
+ %% Crash vhost on the node hosting queue master
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, A, <<"/">>),
+ timer:sleep(500),
+ assert_followers(B, QName, {B, []}),
+
+ %% Restart the vhost on the node (previously) hosting queue master
+ case rabbit_ct_broker_helpers:rpc(Config, A, rabbit_vhost_sup_sup, start_vhost, [<<"/">>]) of
+ {ok, _Sup} -> ok;
+ {error,{already_started, _Sup}} -> ok
+ end,
+ timer:sleep(500),
+ assert_followers(B, QName, {B, [A]}, [{B, []}]).
+
+random_policy(Config) ->
+ run_proper(fun prop_random_policy/1, [Config]).
+
+failing_random_policies(Config) ->
+ [A, B | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ %% Those set of policies were found as failing by PropEr in the
+ %% `random_policy` test above. We add them explicitly here to make
+ %% sure they get tested.
+ ?assertEqual(true, test_random_policy(Config, Nodes,
+ [{nodes, [A, B]}, {nodes, [A]}])),
+ ?assertEqual(true, test_random_policy(Config, Nodes,
+ [{exactly, 3}, undefined, all, {nodes, [B]}])),
+ ?assertEqual(true, test_random_policy(Config, Nodes,
+ [all, undefined, {exactly, 2}, all, {exactly, 3}, {exactly, 3},
+ undefined, {exactly, 3}, all])).
+
+promote_follower_after_standalone_restart(Config) ->
+ %% Tests that mirrors can be brought up standalone after forgetting the rest
+ %% of the cluster. Slave ordering should be irrelevant.
+ %% https://github.com/rabbitmq/rabbitmq-server/issues/1213
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>),
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, 15),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ rabbit_ct_helpers:await_condition(fun() ->
+ 15 =:= proplists:get_value(messages, find_queue(?QNAME, A))
+ end, 60000),
+
+ rabbit_ct_broker_helpers:stop_node(Config, C),
+ rabbit_ct_broker_helpers:stop_node(Config, B),
+ rabbit_ct_broker_helpers:stop_node(Config, A),
+
+ %% Restart one mirror
+ forget_cluster_node(Config, B, C),
+ forget_cluster_node(Config, B, A),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, B),
+ rabbit_ct_helpers:await_condition(fun() ->
+ 15 =:= proplists:get_value(messages, find_queue(?QNAME, B))
+ end, 60000),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, B),
+
+ %% Restart the other
+ forget_cluster_node(Config, C, B),
+ forget_cluster_node(Config, C, A),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, C),
+ 15 = proplists:get_value(messages, find_queue(?QNAME, C)),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, C),
+
+ ok.
+
+rebalance_all(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q1}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q2}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q3}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q4}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q5}),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"q.*">>, <<"all">>),
+ timer:sleep(1000),
+
+ rabbit_ct_client_helpers:publish(ACh, Q1, 5),
+ rabbit_ct_client_helpers:publish(ACh, Q2, 3),
+ assert_followers(A, Q1, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q2, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q3, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q4, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q5, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+
+ {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]),
+
+ %% Check that we have at most 2 queues per node
+ Condition1 = fun() ->
+ lists:all(fun(NodeData) ->
+ lists:all(fun({_, V}) when is_integer(V) -> V =< 2;
+ (_) -> true end,
+ NodeData)
+ end, Summary)
+ end,
+ rabbit_ct_helpers:await_condition(Condition1, 60000),
+
+ %% Check that Q1 and Q2 haven't moved
+ assert_followers(A, Q1, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q2, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+
+ ok.
+
+rebalance_exactly(Config) ->
+ [A, _, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q1}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q2}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q3}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q4}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q5}),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"q.*">>, {<<"exactly">>, 2}),
+ timer:sleep(1000),
+
+ %% Rebalancing happens with existing mirrors. Thus, before we
+ %% can verify it works as expected, we need the queues to be on
+ %% different mirrors.
+ %%
+ %% We only test Q3, Q4 and Q5 because the first two are expected to
+ %% stay where they are.
+ ensure_queues_are_mirrored_on_different_mirrors([Q3, Q4, Q5], A, ACh),
+
+ rabbit_ct_client_helpers:publish(ACh, Q1, 5),
+ rabbit_ct_client_helpers:publish(ACh, Q2, 3),
+
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))),
+
+ {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]),
+
+ %% Check that we have at most 2 queues per node
+ Condition1 = fun() ->
+ lists:all(fun(NodeData) ->
+ lists:all(fun({_, V}) when is_integer(V) -> V =< 2;
+ (_) -> true end,
+ NodeData)
+ end, Summary)
+ end,
+ rabbit_ct_helpers:await_condition(Condition1, 60000),
+
+ %% Check that Q1 and Q2 haven't moved
+ Condition2 = fun () ->
+ A =:= node(proplists:get_value(pid, find_queue(Q1, A))) andalso
+ A =:= node(proplists:get_value(pid, find_queue(Q2, A)))
+ end,
+ rabbit_ct_helpers:await_condition(Condition2, 40000),
+
+ ok.
+
+ensure_queues_are_mirrored_on_different_mirrors(Queues, Master, Ch) ->
+ SNodes = [node(SPid)
+ || Q <- Queues,
+ SPid <- proplists:get_value(slave_pids, find_queue(Q, Master))],
+ UniqueSNodes = lists:usort(SNodes),
+ case UniqueSNodes of
+ [_] ->
+ %% All passed queues are on the same mirror. Let's redeclare
+ %% one of them and test again.
+ Q = hd(Queues),
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q}),
+ ensure_queues_are_mirrored_on_different_mirrors(Queues, Master, Ch);
+ _ ->
+ ok
+ end.
+
+rebalance_nodes(Config) ->
+ [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q1}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q2}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q3}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q4}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q5}),
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"q.*">>,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(A), rabbit_misc:atom_to_binary(B)]}),
+ timer:sleep(1000),
+
+ rabbit_ct_client_helpers:publish(ACh, Q1, 5),
+ rabbit_ct_client_helpers:publish(ACh, Q2, 3),
+
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))),
+
+ {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]),
+
+ %% Check that we have at most 3 queues per node
+ ?assert(lists:all(fun(NodeData) ->
+ lists:all(fun({_, V}) when is_integer(V) -> V =< 3;
+ (_) -> true end,
+ NodeData)
+ end, Summary)),
+ %% Check that Q1 and Q2 haven't moved
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))),
+
+ ok.
+
+rebalance_multiple_blocked(Config) ->
+ [A, _, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q1}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q2}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q3}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q4}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q5}),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))),
+ ?assert(rabbit_ct_broker_helpers:rpc(
+ Config, A,
+ ?MODULE, rebalance_multiple_blocked1, [Config])).
+
+rebalance_multiple_blocked1(_) ->
+ Parent = self(),
+ Fun = fun() ->
+ Parent ! rabbit_amqqueue:rebalance(classic, ".*", ".*")
+ end,
+ spawn(Fun),
+ spawn(Fun),
+ Rets = [receive Ret1 -> Ret1 end,
+ receive Ret2 -> Ret2 end],
+ lists:member({error, rebalance_in_progress}, Rets).
+
+%%----------------------------------------------------------------------------
+
+assert_followers(RPCNode, QName, Exp) ->
+ assert_followers(RPCNode, QName, Exp, []).
+
+assert_followers(RPCNode, QName, Exp, PermittedIntermediate) ->
+ assert_followers0(RPCNode, QName, Exp,
+ [{get(previous_exp_m_node), get(previous_exp_s_nodes)} |
+ PermittedIntermediate], 1000).
+
+assert_followers0(_RPCNode, _QName, [], _PermittedIntermediate, _Attempts) ->
+ error(invalid_expectation);
+assert_followers0(RPCNode, QName, [{ExpMNode, ExpSNodes}|T], PermittedIntermediate, Attempts) ->
+ case assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, nofail) of
+ ok ->
+ ok;
+ failed ->
+ assert_followers0(RPCNode, QName, T, PermittedIntermediate, Attempts - 1)
+ end;
+assert_followers0(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts) ->
+ assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, fail).
+
+assert_followers1(_RPCNode, _QName, _Exp, _PermittedIntermediate, 0, fail) ->
+ error(give_up_waiting_for_followers);
+assert_followers1(_RPCNode, _QName, _Exp, _PermittedIntermediate, 0, nofail) ->
+ failed;
+assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, FastFail) ->
+ Q = find_queue(QName, RPCNode),
+ Pid = proplists:get_value(pid, Q),
+ SPids = proplists:get_value(slave_pids, Q),
+ ActMNode = node(Pid),
+ ActSNodes = case SPids of
+ '' -> '';
+ _ -> [node(SPid) || SPid <- SPids]
+ end,
+ case ExpMNode =:= ActMNode andalso equal_list(ExpSNodes, ActSNodes) of
+ false ->
+ %% It's an async change, so if nothing has changed let's
+ %% just wait - of course this means if something does not
+ %% change when expected then we time out the test which is
+ %% a bit tedious
+ case [{PermMNode, PermSNodes} || {PermMNode, PermSNodes} <- PermittedIntermediate,
+ PermMNode =:= ActMNode,
+ equal_list(PermSNodes, ActSNodes)] of
+ [] ->
+ case FastFail of
+ fail ->
+ ct:fail("Expected ~p / ~p, got ~p / ~p~nat ~p~n",
+ [ExpMNode, ExpSNodes, ActMNode, ActSNodes,
+ get_stacktrace()]);
+ nofail ->
+ failed
+ end;
+ State ->
+ ct:pal("Waiting to leave state ~p~n Waiting for ~p~n",
+ [State, {ExpMNode, ExpSNodes}]),
+ timer:sleep(200),
+ assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes},
+ PermittedIntermediate,
+ Attempts - 1, FastFail)
+ end;
+ true ->
+ put(previous_exp_m_node, ExpMNode),
+ put(previous_exp_s_nodes, ExpSNodes),
+ ok
+ end.
+
+equal_list('', '') -> true;
+equal_list('', _Act) -> false;
+equal_list(_Exp, '') -> false;
+equal_list([], []) -> true;
+equal_list(_Exp, []) -> false;
+equal_list([], _Act) -> false;
+equal_list([H|T], Act) -> case lists:member(H, Act) of
+ true -> equal_list(T, Act -- [H]);
+ false -> false
+ end.
+
+find_queue(QName, RPCNode) ->
+ find_queue(QName, RPCNode, 1000).
+
+find_queue(QName, RPCNode, 0) -> error({did_not_find_queue, QName, RPCNode});
+find_queue(QName, RPCNode, Attempts) ->
+ Qs = rpc:call(RPCNode, rabbit_amqqueue, info_all, [?VHOST], infinity),
+ case find_queue0(QName, Qs) of
+ did_not_find_queue -> timer:sleep(100),
+ find_queue(QName, RPCNode, Attempts - 1);
+ Q -> Q
+ end.
+
+find_queue0(QName, Qs) ->
+ case [Q || Q <- Qs, proplists:get_value(name, Q) =:=
+ rabbit_misc:r(?VHOST, queue, QName)] of
+ [R] -> R;
+ [] -> did_not_find_queue
+ end.
+
+get_stacktrace() ->
+ try
+ throw(e)
+ catch
+ _:e:Stacktrace ->
+ Stacktrace
+ end.
+
+%%----------------------------------------------------------------------------
+run_proper(Fun, Args) ->
+ ?assertEqual(true,
+ proper:counterexample(erlang:apply(Fun, Args),
+ [{numtests, 25},
+ {on_output, fun(F, A) -> ct:pal(?LOW_IMPORTANCE, F, A) end}])).
+
+prop_random_policy(Config) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ ?FORALL(
+ Policies, non_empty(list(policy_gen(Nodes))),
+ test_random_policy(Config, Nodes, Policies)).
+
+apply_policy_to_declared_queue(Config, Ch, Nodes, Policies) ->
+ [NodeA | _] = Nodes,
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}),
+ %% Add some load so mirrors can be busy synchronising
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, 100000),
+ %% Apply policies in parallel on all nodes
+ apply_in_parallel(Config, Nodes, Policies),
+ %% Give it some time to generate all internal notifications
+ timer:sleep(2000),
+ %% Check the result
+ wait_for_last_policy(?QNAME, NodeA, Policies, 30).
+
+test_random_policy(Config, Nodes, Policies) ->
+ [NodeA | _] = Nodes,
+ Ch = rabbit_ct_client_helpers:open_channel(Config, NodeA),
+ Result = apply_policy_to_declared_queue(Config, Ch, Nodes, Policies),
+ %% Cleanup
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}),
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, NodeA, ?POLICY),
+ Result.
+
+apply_in_parallel(Config, Nodes, Policies) ->
+ Self = self(),
+ [spawn_link(fun() ->
+ [begin
+
+ apply_policy(Config, N, Policy)
+ end || Policy <- Policies],
+ Self ! parallel_task_done
+ end) || N <- Nodes],
+ [receive
+ parallel_task_done ->
+ ok
+ end || _ <- Nodes].
+
+%% Proper generators
+policy_gen(Nodes) ->
+ %% Stop mirroring needs to be called often to trigger rabbitmq-server#803
+ frequency([{3, undefined},
+ {1, all},
+ {1, {nodes, nodes_gen(Nodes)}},
+ {1, {exactly, choose(1, 3)}}
+ ]).
+
+nodes_gen(Nodes) ->
+ ?LET(List, non_empty(list(oneof(Nodes))),
+ sets:to_list(sets:from_list(List))).
+
+%% Checks
+wait_for_last_policy(QueueName, NodeA, TestedPolicies, Tries) ->
+ %% Ensure the owner/master is able to process a call request,
+ %% which means that all pending casts have been processed.
+ %% Use the information returned by owner/master to verify the
+ %% test result
+ Info = find_queue(QueueName, NodeA),
+ Pid = proplists:get_value(pid, Info),
+ Node = node(Pid),
+ %% Gets owner/master
+ case rpc:call(Node, gen_server, call, [Pid, info], 5000) of
+ {badrpc, _} ->
+ %% The queue is probably being migrated to another node.
+ %% Let's wait a bit longer.
+ timer:sleep(1000),
+ wait_for_last_policy(QueueName, NodeA, TestedPolicies, Tries - 1);
+ Result ->
+ FinalInfo = case Result of
+ {ok, I} -> I;
+ _ when is_list(Result) ->
+ Result
+ end,
+ %% The last policy is the final state
+ LastPolicy = lists:last(TestedPolicies),
+ case verify_policy(LastPolicy, FinalInfo) of
+ true ->
+ true;
+ false when Tries =:= 1 ->
+ Policies = rpc:call(Node, rabbit_policy, list, [], 5000),
+ ct:pal(
+ "Last policy not applied:~n"
+ " Queue node: ~s (~p)~n"
+ " Queue info: ~p~n"
+ " Configured policies: ~p~n"
+ " Tested policies: ~p",
+ [Node, Pid, FinalInfo, Policies, TestedPolicies]),
+ false;
+ false ->
+ timer:sleep(1000),
+ wait_for_last_policy(QueueName, NodeA, TestedPolicies,
+ Tries - 1)
+ end
+ end.
+
+verify_policy(undefined, Info) ->
+ %% If the queue is not mirrored, it returns ''
+ '' == proplists:get_value(slave_pids, Info);
+verify_policy(all, Info) ->
+ 2 == length(proplists:get_value(slave_pids, Info));
+verify_policy({exactly, 1}, Info) ->
+ %% If the queue is mirrored, it returns a list
+ [] == proplists:get_value(slave_pids, Info);
+verify_policy({exactly, N}, Info) ->
+ (N - 1) == length(proplists:get_value(slave_pids, Info));
+verify_policy({nodes, Nodes}, Info) ->
+ Master = node(proplists:get_value(pid, Info)),
+ Slaves = [node(P) || P <- proplists:get_value(slave_pids, Info)],
+ lists:sort(Nodes) == lists:sort([Master | Slaves]).
+
+%% Policies
+apply_policy(Config, N, undefined) ->
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, N, ?POLICY);
+apply_policy(Config, N, all) ->
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]);
+apply_policy(Config, N, {nodes, Nodes}) ->
+ NNodes = [rabbit_misc:atom_to_binary(Node) || Node <- Nodes],
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, {<<"nodes">>, NNodes},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]);
+apply_policy(Config, N, {exactly, Exactly}) ->
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, {<<"exactly">>, Exactly},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]).
+
+forget_cluster_node(Config, Node, NodeToRemove) ->
+ rabbit_ct_broker_helpers:rabbitmqctl(
+ Config, Node, ["forget_cluster_node", "--offline", NodeToRemove]).
diff --git a/deps/rabbit/test/dynamic_qq_SUITE.erl b/deps/rabbit/test/dynamic_qq_SUITE.erl
new file mode 100644
index 0000000000..9a8f2110d6
--- /dev/null
+++ b/deps/rabbit/test/dynamic_qq_SUITE.erl
@@ -0,0 +1,248 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(dynamic_qq_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(quorum_queue_utils, [wait_for_messages_ready/3,
+ ra_name/1]).
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {clustered, [], [
+ {cluster_size_3, [], [
+ recover_follower_after_standalone_restart,
+ vhost_deletion,
+ force_delete_if_no_consensus,
+ takeover_on_failure,
+ takeover_on_shutdown,
+ quorum_unaffected_after_vhost_failure
+ ]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(clustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}},
+ {queue_name, Q},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config2, quorum_queue) of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end.
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+%% Vhost deletion needs to successfully tear down queues.
+vhost_deletion(Config) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+ ok = rpc:call(Node, rabbit_vhost, delete, [<<"/">>, <<"acting-user">>]),
+ ?assertMatch([],
+ rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name"])),
+ ok.
+
+force_delete_if_no_consensus(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+ rabbit_ct_client_helpers:publish(ACh, QName, 10),
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, A),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, C),
+
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ ?assertMatch(
+ #'queue.declare_ok'{},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true,
+ passive = true})),
+ BCh2 = rabbit_ct_client_helpers:open_channel(Config, B),
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(BCh2, #'queue.delete'{queue = QName})),
+ ok.
+
+takeover_on_failure(Config) ->
+ takeover_on(Config, kill_node).
+
+takeover_on_shutdown(Config) ->
+ takeover_on(Config, stop_node).
+
+takeover_on(Config, Fun) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+ rabbit_ct_client_helpers:publish(ACh, QName, 10),
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+
+ ok = rabbit_ct_broker_helpers:Fun(Config, C),
+ ok = rabbit_ct_broker_helpers:Fun(Config, A),
+
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true}),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ ACh2 = rabbit_ct_client_helpers:open_channel(Config, A),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true}),
+ ok.
+
+quorum_unaffected_after_vhost_failure(Config) ->
+ [A, B, _] = Servers0 = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Servers = lists:sort(Servers0),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+ timer:sleep(300),
+
+ Info0 = rpc:call(A, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QName)]),
+ ?assertEqual(Servers, lists:sort(proplists:get_value(online, Info0, []))),
+
+ %% Crash vhost on both nodes
+ {ok, SupA} = rabbit_ct_broker_helpers:rpc(Config, A, rabbit_vhost_sup_sup, get_vhost_sup, [<<"/">>]),
+ exit(SupA, foo),
+ {ok, SupB} = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_vhost_sup_sup, get_vhost_sup, [<<"/">>]),
+ exit(SupB, foo),
+
+ Info = rpc:call(A, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QName)]),
+ ?assertEqual(Servers, lists:sort(proplists:get_value(online, Info, []))).
+
+recover_follower_after_standalone_restart(Config) ->
+ case os:getenv("SECONDARY_UMBRELLA") of
+ false ->
+ %% Tests that followers can be brought up standalone after forgetting the
+ %% rest of the cluster. Consensus won't be reached as there is only one node in the
+ %% new cluster.
+ Servers = [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+
+ rabbit_ct_client_helpers:publish(Ch, QName, 15),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ Name = ra_name(QName),
+ wait_for_messages_ready(Servers, Name, 15),
+
+ rabbit_ct_broker_helpers:stop_node(Config, C),
+ rabbit_ct_broker_helpers:stop_node(Config, B),
+ rabbit_ct_broker_helpers:stop_node(Config, A),
+
+ %% Restart one follower
+ forget_cluster_node(Config, B, C),
+ forget_cluster_node(Config, B, A),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, B),
+ wait_for_messages_ready([B], Name, 15),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, B),
+
+ %% Restart the other
+ forget_cluster_node(Config, C, B),
+ forget_cluster_node(Config, C, A),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, C),
+ wait_for_messages_ready([C], Name, 15),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, C),
+ ok;
+ _ ->
+ {skip, "cannot be run in mixed mode"}
+ end.
+
+%%----------------------------------------------------------------------------
+forget_cluster_node(Config, Node, NodeToRemove) ->
+ rabbit_ct_broker_helpers:rabbitmqctl(
+ Config, Node, ["forget_cluster_node", "--offline", NodeToRemove]).
diff --git a/deps/rabbit/test/eager_sync_SUITE.erl b/deps/rabbit/test/eager_sync_SUITE.erl
new file mode 100644
index 0000000000..a9e2ea2107
--- /dev/null
+++ b/deps/rabbit/test/eager_sync_SUITE.erl
@@ -0,0 +1,271 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(eager_sync_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"ha.two.test">>).
+-define(QNAME_AUTO, <<"ha.auto.test">>).
+-define(MESSAGE_COUNT, 200000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ eager_sync,
+ eager_sync_cancel,
+ eager_sync_auto,
+ eager_sync_auto_on_policy_change,
+ eager_sync_requeue
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 3,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+eager_sync(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ %% Don't sync, lose messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
+
+ %% Sync, keep messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ ok = sync(C, ?QNAME),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ %% Check the no-need-to-sync path
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ ok = sync(C, ?QNAME),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ %% keep unacknowledged messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 2),
+ restart(Config, A),
+ rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 3),
+ sync(C, ?QNAME),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ ok.
+
+eager_sync_cancel(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+
+ set_app_sync_batch_size(A),
+ set_app_sync_batch_size(B),
+ set_app_sync_batch_size(C),
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+ {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+ eager_sync_cancel_test2(Config, A, B, C, Ch, 100).
+
+eager_sync_cancel_test2(_, _, _, _, _, 0) ->
+ error(no_more_attempts_left);
+eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts) ->
+ %% Sync then cancel
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ set_app_sync_batch_size(A),
+ spawn_link(fun() -> ok = sync_nowait(C, ?QNAME) end),
+ case wait_for_syncing(C, ?QNAME, 1) of
+ ok ->
+ case sync_cancel(C, ?QNAME) of
+ ok ->
+ wait_for_running(C, ?QNAME),
+ restart(Config, B),
+ set_app_sync_batch_size(B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
+
+ {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+ ok;
+ {ok, not_syncing} ->
+ %% Damn. Syncing finished between wait_for_syncing/3 and
+ %% sync_cancel/2 above. Start again.
+ amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+ eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts - 1)
+ end;
+ synced_already ->
+ %% Damn. Syncing finished before wait_for_syncing/3. Start again.
+ amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+ eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts - 1)
+ end.
+
+eager_sync_auto(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME_AUTO,
+ durable = true}),
+
+ %% Sync automatically, don't lose messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+ restart(Config, A),
+ wait_for_sync(C, ?QNAME_AUTO),
+ restart(Config, B),
+ wait_for_sync(C, ?QNAME_AUTO),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+
+ ok.
+
+eager_sync_auto_on_policy_change(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ %% Sync automatically once the policy is changed to tell us to.
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ Params = [rabbit_misc:atom_to_binary(N) || N <- [A, B]],
+ rabbit_ct_broker_helpers:set_ha_policy(Config,
+ A, <<"^ha.two.">>, {<<"nodes">>, Params},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ wait_for_sync(C, ?QNAME),
+
+ ok.
+
+eager_sync_requeue(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, 2),
+ {#'basic.get_ok'{delivery_tag = TagA}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+ {#'basic.get_ok'{delivery_tag = TagB}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagA, requeue = true}),
+ restart(Config, B),
+ ok = sync(C, ?QNAME),
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagB, requeue = true}),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 2),
+
+ ok.
+
+restart(Config, Node) ->
+ rabbit_ct_broker_helpers:restart_broker(Config, Node).
+
+sync(Node, QName) ->
+ case sync_nowait(Node, QName) of
+ ok -> wait_for_sync(Node, QName),
+ ok;
+ R -> R
+ end.
+
+sync_nowait(Node, QName) -> action(Node, sync_queue, QName).
+sync_cancel(Node, QName) -> action(Node, cancel_sync_queue, QName).
+
+wait_for_sync(Node, QName) ->
+ sync_detection_SUITE:wait_for_sync_status(true, Node, QName).
+
+action(Node, Action, QName) ->
+ rabbit_control_helper:command_with_output(
+ Action, Node, [binary_to_list(QName)], [{"-p", "/"}]).
+
+queue(Node, QName) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, QName),
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
+ Q.
+
+wait_for_syncing(Node, QName, Target) ->
+ case state(Node, QName) of
+ {{syncing, _}, _} -> ok;
+ {running, Target} -> synced_already;
+ _ -> timer:sleep(100),
+ wait_for_syncing(Node, QName, Target)
+ end.
+
+wait_for_running(Node, QName) ->
+ case state(Node, QName) of
+ {running, _} -> ok;
+ _ -> timer:sleep(100),
+ wait_for_running(Node, QName)
+ end.
+
+state(Node, QName) ->
+ [{state, State}, {synchronised_slave_pids, Pids}] =
+ rpc:call(Node, rabbit_amqqueue, info,
+ [queue(Node, QName), [state, synchronised_slave_pids]]),
+ {State, length(Pids)}.
+
+%% eager_sync_cancel_test needs a batch size that's < ?MESSAGE_COUNT
+%% in order to pass, because a SyncBatchSize >= ?MESSAGE_COUNT will
+%% always finish before the test is able to cancel the sync.
+set_app_sync_batch_size(Node) ->
+ rabbit_control_helper:command(
+ eval, Node,
+ ["application:set_env(rabbit, mirroring_sync_batch_size, 1)."]).
diff --git a/deps/rabbit/test/failing_dummy_interceptor.erl b/deps/rabbit/test/failing_dummy_interceptor.erl
new file mode 100644
index 0000000000..62669e7f1f
--- /dev/null
+++ b/deps/rabbit/test/failing_dummy_interceptor.erl
@@ -0,0 +1,27 @@
+-module(failing_dummy_interceptor).
+
+-behaviour(rabbit_channel_interceptor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+
+-compile(export_all).
+
+init(_Ch) ->
+ timer:sleep(15500),
+ undefined.
+
+description() ->
+ [{description,
+ <<"Empties payload on publish">>}].
+
+intercept(#'basic.publish'{} = Method, Content, _IState) ->
+ Content2 = Content#content{payload_fragments_rev = []},
+ {Method, Content2};
+
+intercept(Method, Content, _VHost) ->
+ {Method, Content}.
+
+applies_to() ->
+ ['basic.publish'].
diff --git a/deps/rabbit/test/feature_flags_SUITE.erl b/deps/rabbit/test/feature_flags_SUITE.erl
new file mode 100644
index 0000000000..29dfcf068b
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE.erl
@@ -0,0 +1,1156 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(feature_flags_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([suite/0,
+ all/0,
+ groups/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+
+ registry_general_usage/1,
+ registry_concurrent_reloads/1,
+ enable_feature_flag_in_a_healthy_situation/1,
+ enable_unsupported_feature_flag_in_a_healthy_situation/1,
+ enable_feature_flag_when_ff_file_is_unwritable/1,
+ enable_feature_flag_with_a_network_partition/1,
+ mark_feature_flag_as_enabled_with_a_network_partition/1,
+
+ clustering_ok_with_ff_disabled_everywhere/1,
+ clustering_ok_with_ff_enabled_on_some_nodes/1,
+ clustering_ok_with_ff_enabled_everywhere/1,
+ clustering_ok_with_new_ff_disabled/1,
+ clustering_denied_with_new_ff_enabled/1,
+ clustering_ok_with_new_ff_disabled_from_plugin_on_some_nodes/1,
+ clustering_ok_with_new_ff_enabled_from_plugin_on_some_nodes/1,
+ activating_plugin_with_new_ff_disabled/1,
+ activating_plugin_with_new_ff_enabled/1
+ ]).
+
+suite() ->
+ [{timetrap, {minutes, 15}}].
+
+all() ->
+ [
+ {group, registry},
+ {group, enabling_on_single_node},
+ {group, enabling_in_cluster},
+ {group, clustering},
+ {group, activating_plugin}
+ ].
+
+groups() ->
+ [
+ {registry, [],
+ [
+ registry_general_usage,
+ registry_concurrent_reloads
+ ]},
+ {enabling_on_single_node, [],
+ [
+ enable_feature_flag_in_a_healthy_situation,
+ enable_unsupported_feature_flag_in_a_healthy_situation,
+ enable_feature_flag_when_ff_file_is_unwritable
+ ]},
+ {enabling_in_cluster, [],
+ [
+ enable_feature_flag_in_a_healthy_situation,
+ enable_unsupported_feature_flag_in_a_healthy_situation,
+ enable_feature_flag_when_ff_file_is_unwritable,
+ enable_feature_flag_with_a_network_partition,
+ mark_feature_flag_as_enabled_with_a_network_partition
+ ]},
+ {clustering, [],
+ [
+ clustering_ok_with_ff_disabled_everywhere,
+ clustering_ok_with_ff_enabled_on_some_nodes,
+ clustering_ok_with_ff_enabled_everywhere,
+ clustering_ok_with_new_ff_disabled,
+ clustering_denied_with_new_ff_enabled,
+ clustering_ok_with_new_ff_disabled_from_plugin_on_some_nodes,
+ clustering_ok_with_new_ff_enabled_from_plugin_on_some_nodes
+ ]},
+ {activating_plugin, [],
+ [
+ activating_plugin_with_new_ff_disabled,
+ activating_plugin_with_new_ff_enabled
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config, [
+ fun rabbit_ct_broker_helpers:configure_dist_proxy/1
+ ]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(enabling_on_single_node, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodes_count, 1}]);
+init_per_group(enabling_in_cluster, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodes_count, 5}]);
+init_per_group(clustering, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodes_count, 2},
+ {rmq_nodes_clustered, false},
+ {start_rmq_with_plugins_disabled, true}]),
+ rabbit_ct_helpers:run_setup_steps(Config1, [
+ fun build_my_plugin/1,
+ fun work_around_cli_and_rabbit_circular_dep/1
+ ]);
+init_per_group(activating_plugin, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodes_count, 2},
+ {rmq_nodes_clustered, true},
+ {start_rmq_with_plugins_disabled, true}]),
+ rabbit_ct_helpers:run_setup_steps(Config1, [
+ fun build_my_plugin/1,
+ fun work_around_cli_and_rabbit_circular_dep/1
+ ]);
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ case ?config(tc_group_properties, Config) of
+ [{name, registry} | _] ->
+ application:set_env(lager, colored, true),
+ application:set_env(
+ lager,
+ handlers, [{lager_console_backend, [{level, debug}]}]),
+ application:set_env(
+ lager,
+ extra_sinks,
+ [{rabbit_log_lager_event,
+ [{handlers, [{lager_console_backend, [{level, debug}]}]}]
+ },
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers, [{lager_console_backend, [{level, debug}]}]}]
+ }]),
+ lager:start(),
+ FeatureFlagsFile = filename:join(?config(priv_dir, Config),
+ rabbit_misc:format(
+ "feature_flags-~s",
+ [Testcase])),
+ application:set_env(rabbit, feature_flags_file, FeatureFlagsFile),
+ rabbit_ct_helpers:set_config(
+ Config, {feature_flags_file, FeatureFlagsFile});
+ [{name, Name} | _]
+ when Name =:= enabling_on_single_node orelse
+ Name =:= clustering orelse
+ Name =:= activating_plugin ->
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes,
+ TestNumber * ClusterSize}}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit,
+ [{forced_feature_flags_on_init, []},
+ {log, [{file, [{level, debug}]}]}]}),
+ Config3 = rabbit_ct_helpers:run_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case Config3 of
+ {skip, _} ->
+ Config3;
+ _ ->
+ case is_feature_flag_subsystem_available(Config3) of
+ true ->
+ %% We can declare a new feature flag at
+ %% runtime. All of them are supported but
+ %% still disabled.
+ declare_arbitrary_feature_flag(Config3),
+ Config3;
+ false ->
+ end_per_testcase(Testcase, Config3),
+ {skip, "Feature flags subsystem unavailable"}
+ end
+ end;
+ [{name, enabling_in_cluster} | _] ->
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes,
+ TestNumber * ClusterSize}},
+ {net_ticktime, 5}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit,
+ [{forced_feature_flags_on_init, []},
+ {log, [{file, [{level, debug}]}]}]}),
+ Config3 = rabbit_ct_helpers:run_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case Config3 of
+ {skip, _} ->
+ Config3;
+ _ ->
+ case is_feature_flag_subsystem_available(Config3) of
+ true ->
+ %% We can declare a new feature flag at
+ %% runtime. All of them are supported but
+ %% still disabled.
+ declare_arbitrary_feature_flag(Config3),
+ Config3;
+ false ->
+ end_per_testcase(Testcase, Config3),
+ {skip, "Feature flags subsystem unavailable"}
+ end
+ end
+ end.
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = case ?config(tc_group_properties, Config) of
+ [{name, registry} | _] ->
+ Config;
+ _ ->
+ rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps())
+ end,
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+-define(list_ff(Which),
+ lists:sort(maps:keys(rabbit_ff_registry:list(Which)))).
+
+registry_general_usage(_Config) ->
+ %% At first, the registry must be uninitialized.
+ ?assertNot(rabbit_ff_registry:is_registry_initialized()),
+
+ FeatureFlags = #{ff_a =>
+ #{desc => "Feature flag A",
+ stability => stable},
+ ff_b =>
+ #{desc => "Feature flag B",
+ stability => stable}},
+ rabbit_feature_flags:inject_test_feature_flags(
+ feature_flags_to_app_attrs(FeatureFlags)),
+
+ %% After initialization, it must know about the feature flags
+ %% declared in this testsuite. They must be disabled however.
+ rabbit_feature_flags:initialize_registry(),
+ ?assert(rabbit_ff_registry:is_registry_initialized()),
+ ?assertMatch([ff_a, ff_b], ?list_ff(all)),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertEqual(erlang:map_size(rabbit_ff_registry:states()), 0),
+ ?assertMatch([], ?list_ff(enabled)),
+ ?assertMatch([], ?list_ff(state_changing)),
+ ?assertMatch([ff_a, ff_b], ?list_ff(disabled)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)),
+
+ %% We can declare a new feature flag at runtime. All of them are
+ %% supported but still disabled.
+ NewFeatureFlags = #{ff_c =>
+ #{desc => "Feature flag C",
+ provided_by => ?MODULE,
+ stability => stable}},
+ rabbit_feature_flags:initialize_registry(NewFeatureFlags),
+ ?assertMatch([ff_a, ff_b, ff_c],
+ lists:sort(maps:keys(rabbit_ff_registry:list(all)))),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assert(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertEqual(erlang:map_size(rabbit_ff_registry:states()), 0),
+ ?assertMatch([], ?list_ff(enabled)),
+ ?assertMatch([], ?list_ff(state_changing)),
+ ?assertMatch([ff_a, ff_b, ff_c], ?list_ff(disabled)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)),
+
+ %% After enabling `ff_a`, it is actually the case. Others are
+ %% supported but remain disabled.
+ rabbit_feature_flags:initialize_registry(#{},
+ #{ff_a => true},
+ true),
+ ?assertMatch([ff_a, ff_b, ff_c],
+ lists:sort(maps:keys(rabbit_ff_registry:list(all)))),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assert(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertMatch(#{ff_a := true}, rabbit_ff_registry:states()),
+ ?assertMatch([ff_a], ?list_ff(enabled)),
+ ?assertMatch([], ?list_ff(state_changing)),
+ ?assertMatch([ff_b, ff_c], ?list_ff(disabled)),
+ ?assert(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)),
+
+ %% This time, we mark the state of `ff_c` as `state_changing`. We
+ %% expect all other feature flag states to remain unchanged.
+ rabbit_feature_flags:initialize_registry(#{},
+ #{ff_a => false,
+ ff_c => state_changing},
+ true),
+ ?assertMatch([ff_a, ff_b, ff_c],
+ lists:sort(maps:keys(rabbit_ff_registry:list(all)))),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assert(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertMatch(#{ff_c := state_changing}, rabbit_ff_registry:states()),
+ ?assertMatch([], ?list_ff(enabled)),
+ ?assertMatch([ff_c], ?list_ff(state_changing)),
+ ?assertMatch([ff_a, ff_b], ?list_ff(disabled)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertMatch(state_changing, rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)),
+
+ %% Finally, we disable `ff_c`. All of them are supported but
+ %% disabled.
+ rabbit_feature_flags:initialize_registry(#{},
+ #{ff_b => false,
+ ff_c => false},
+ true),
+ ?assertMatch([ff_a, ff_b, ff_c],
+ lists:sort(maps:keys(rabbit_ff_registry:list(all)))),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assert(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertEqual(erlang:map_size(rabbit_ff_registry:states()), 0),
+ ?assertMatch([], ?list_ff(enabled)),
+ ?assertMatch([], ?list_ff(state_changing)),
+ ?assertMatch([ff_a, ff_b, ff_c], ?list_ff(disabled)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)).
+
+registry_concurrent_reloads(_Config) ->
+ case rabbit_ff_registry:is_registry_initialized() of
+ true -> ok;
+ false -> rabbit_feature_flags:initialize_registry()
+ end,
+ ?assert(rabbit_ff_registry:is_registry_initialized()),
+
+ Parent = self(),
+
+ MakeName = fun(I) ->
+ list_to_atom(rabbit_misc:format("ff_~2..0b", [I]))
+ end,
+
+ ProcIs = lists:seq(1, 10),
+ Fun = fun(I) ->
+ %% Each process will declare its own feature flag to
+ %% make sure that each generated registry module is
+ %% different, and we don't loose previously declared
+ %% feature flags.
+ Name = MakeName(I),
+ Desc = rabbit_misc:format("Feature flag ~b", [I]),
+ NewFF = #{Name =>
+ #{desc => Desc,
+ stability => stable}},
+ rabbit_feature_flags:initialize_registry(NewFF),
+ unlink(Parent)
+ end,
+
+ %% Prepare feature flags which the spammer process should get at
+ %% some point.
+ FeatureFlags = #{ff_a =>
+ #{desc => "Feature flag A",
+ stability => stable},
+ ff_b =>
+ #{desc => "Feature flag B",
+ stability => stable}},
+ rabbit_feature_flags:inject_test_feature_flags(
+ feature_flags_to_app_attrs(FeatureFlags)),
+
+ %% Spawn a process which heavily uses the registry.
+ FinalFFList = lists:sort(
+ maps:keys(FeatureFlags) ++
+ [MakeName(I) || I <- ProcIs]),
+ Spammer = spawn_link(fun() -> registry_spammer([], FinalFFList) end),
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Started registry spammer (~p)",
+ [self()]),
+
+ %% We acquire the lock from the main process to synchronize the test
+ %% processes we are about to spawn.
+ Lock = rabbit_feature_flags:registry_loading_lock(),
+ ThisNode = [node()],
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Acquiring registry load lock"),
+ global:set_lock(Lock, ThisNode),
+
+ Pids = [begin
+ Pid = spawn_link(fun() -> Fun(I) end),
+ _ = erlang:monitor(process, Pid),
+ Pid
+ end
+ || I <- ProcIs],
+
+ %% We wait for one second to make sure all processes were started
+ %% and already sleep on the lock. Not really "make sure" because
+ %% we don't have a way to verify this fact, but it must be enough,
+ %% right?
+ timer:sleep(1000),
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Releasing registry load lock"),
+ global:del_lock(Lock, ThisNode),
+
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Wait for test processes to finish"),
+ lists:foreach(
+ fun(Pid) ->
+ receive {'DOWN', _, process, Pid, normal} -> ok end
+ end,
+ Pids),
+
+ %% We wait for one more second to make sure the spammer sees
+ %% all added feature flags.
+ timer:sleep(1000),
+
+ unlink(Spammer),
+ exit(Spammer, normal).
+
+registry_spammer(CurrentFeatureNames, FinalFeatureNames) ->
+ %% Infinite loop.
+ case ?list_ff(all) of
+ CurrentFeatureNames ->
+ registry_spammer(CurrentFeatureNames, FinalFeatureNames);
+ FinalFeatureNames ->
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Registry spammer: all feature flags "
+ "appeared"),
+ registry_spammer1(FinalFeatureNames);
+ NewFeatureNames
+ when length(NewFeatureNames) > length(CurrentFeatureNames) ->
+ registry_spammer(NewFeatureNames, FinalFeatureNames)
+ end.
+
+registry_spammer1(FeatureNames) ->
+ ?assertEqual(FeatureNames, ?list_ff(all)),
+ registry_spammer1(FeatureNames).
+
+enable_feature_flag_in_a_healthy_situation(Config) ->
+ FeatureName = ff_from_testsuite,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Node = ClusterSize - 1,
+ True = lists:duplicate(ClusterSize, true),
+ False = lists:duplicate(ClusterSize, false),
+
+ %% The feature flag is supported but disabled initially.
+ ?assertEqual(
+ True,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Enabling the feature flag works.
+ ?assertEqual(
+ ok,
+ enable_feature_flag_on(Config, Node, FeatureName)),
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Re-enabling the feature flag also works.
+ ?assertEqual(
+ ok,
+ enable_feature_flag_on(Config, Node, FeatureName)),
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)).
+
+enable_unsupported_feature_flag_in_a_healthy_situation(Config) ->
+ FeatureName = unsupported_feature_flag,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Node = ClusterSize - 1,
+ False = lists:duplicate(ClusterSize, false),
+
+ %% The feature flag is unsupported and thus disabled.
+ ?assertEqual(
+ False,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Enabling the feature flag works.
+ ?assertEqual(
+ {error, unsupported},
+ enable_feature_flag_on(Config, Node, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)).
+
+enable_feature_flag_when_ff_file_is_unwritable(Config) ->
+ QQSupported = rabbit_ct_broker_helpers:is_feature_flag_supported(
+ Config, quorum_queue),
+ case QQSupported of
+ true -> do_enable_feature_flag_when_ff_file_is_unwritable(Config);
+ false -> {skip, "Quorum queues are unsupported"}
+ end.
+
+do_enable_feature_flag_when_ff_file_is_unwritable(Config) ->
+ FeatureName = quorum_queue,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Node = ClusterSize - 1,
+ True = lists:duplicate(ClusterSize, true),
+ False = lists:duplicate(ClusterSize, false),
+ Files = feature_flags_files(Config),
+
+ %% The feature flag is supported but disabled initially.
+ ?assertEqual(
+ True,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Restrict permissions on the `feature_flags` files.
+ [?assertEqual(ok, file:change_mode(File, 8#0444)) || File <- Files],
+
+ %% Enabling the feature flag works.
+ ?assertEqual(
+ ok,
+ enable_feature_flag_on(Config, Node, FeatureName)),
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% The `feature_flags` file were not updated.
+ ?assertEqual(
+ lists:duplicate(ClusterSize, {ok, [[]]}),
+ [file:consult(File) || File <- feature_flags_files(Config)]),
+
+ %% Stop all nodes and restore permissions on the `feature_flags` files.
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [?assertEqual(ok, rabbit_ct_broker_helpers:stop_node(Config, N))
+ || N <- Nodes],
+ [?assertEqual(ok, file:change_mode(File, 8#0644)) || File <- Files],
+
+ %% Restart all nodes and assert the feature flag is still enabled and
+ %% the `feature_flags` files were correctly repaired.
+ [?assertEqual(ok, rabbit_ct_broker_helpers:start_node(Config, N))
+ || N <- lists:reverse(Nodes)],
+
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)),
+ ?assertEqual(
+ lists:duplicate(ClusterSize, {ok, [[FeatureName]]}),
+ [file:consult(File) || File <- feature_flags_files(Config)]).
+
+enable_feature_flag_with_a_network_partition(Config) ->
+ FeatureName = ff_from_testsuite,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ True = lists:duplicate(ClusterSize, true),
+ False = lists:duplicate(ClusterSize, false),
+
+ %% The feature flag is supported but disabled initially.
+ ?assertEqual(
+ True,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Isolate nodes B and E from the rest of the cluster.
+ NodePairs = [{B, A},
+ {B, C},
+ {B, D},
+ {E, A},
+ {E, C},
+ {E, D}],
+ block(NodePairs),
+ timer:sleep(1000),
+
+ %% Enabling the feature flag should fail in the specific case of
+ %% `ff_from_testsuite`, if the network is broken.
+ ?assertEqual(
+ {error, unsupported},
+ enable_feature_flag_on(Config, B, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Repair the network and try again to enable the feature flag.
+ unblock(NodePairs),
+ timer:sleep(10000),
+ [?assertEqual(ok, rabbit_ct_broker_helpers:stop_node(Config, N))
+ || N <- [A, C, D]],
+ [?assertEqual(ok, rabbit_ct_broker_helpers:start_node(Config, N))
+ || N <- [A, C, D]],
+ declare_arbitrary_feature_flag(Config),
+
+ %% Enabling the feature flag works.
+ ?assertEqual(
+ ok,
+ enable_feature_flag_on(Config, B, FeatureName)),
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)).
+
+mark_feature_flag_as_enabled_with_a_network_partition(Config) ->
+ FeatureName = ff_from_testsuite,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ True = lists:duplicate(ClusterSize, true),
+ False = lists:duplicate(ClusterSize, false),
+
+ %% The feature flag is supported but disabled initially.
+ ?assertEqual(
+ True,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Isolate node B from the rest of the cluster.
+ NodePairs = [{B, A},
+ {B, C},
+ {B, D},
+ {B, E}],
+ block(NodePairs),
+ timer:sleep(1000),
+
+ %% Mark the feature flag as enabled on all nodes from node B. This
+ %% is expected to timeout.
+ RemoteNodes = [A, C, D, E],
+ ?assertEqual(
+ {failed_to_mark_feature_flag_as_enabled_on_remote_nodes,
+ FeatureName,
+ true,
+ RemoteNodes},
+ rabbit_ct_broker_helpers:rpc(
+ Config, B,
+ rabbit_feature_flags, mark_as_enabled_remotely,
+ [RemoteNodes, FeatureName, true, 20000])),
+
+ RepairFun = fun() ->
+ %% Wait a few seconds before we repair the network.
+ timer:sleep(5000),
+
+ %% Repair the network and try again to enable
+ %% the feature flag.
+ unblock(NodePairs),
+ timer:sleep(1000)
+ end,
+ spawn(RepairFun),
+
+ %% Mark the feature flag as enabled on all nodes from node B. This
+ %% is expected to work this time.
+ ct:pal(?LOW_IMPORTANCE,
+ "Marking the feature flag as enabled on remote nodes...", []),
+ ?assertEqual(
+ ok,
+ rabbit_ct_broker_helpers:rpc(
+ Config, B,
+ rabbit_feature_flags, mark_as_enabled_remotely,
+ [RemoteNodes, FeatureName, true, 120000])).
+
+%% FIXME: Finish the testcase above ^
+
+clustering_ok_with_ff_disabled_everywhere(Config) ->
+ %% All feature flags are disabled. Clustering the two nodes should be
+ %% accepted because they are compatible.
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_supported(Config, ff_from_testsuite)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_supported(Config, ff_from_testsuite)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+ ok.
+
+clustering_ok_with_ff_enabled_on_some_nodes(Config) ->
+ %% The test feature flag is enabled on node 1, but not on node 2.
+ %% Clustering the two nodes should be accepted because they are
+ %% compatible. Also, the feature flag will be enabled on node 2 as a
+ %% consequence.
+ enable_feature_flag_on(Config, 0, ff_from_testsuite),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_supported(Config, ff_from_testsuite)),
+ ?assertEqual([true, false],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+ ok.
+
+clustering_ok_with_ff_enabled_everywhere(Config) ->
+ %% The test feature flags is enabled. Clustering the two nodes
+ %% should be accepted because they are compatible.
+ enable_feature_flag_everywhere(Config, ff_from_testsuite),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+ ok.
+
+clustering_ok_with_new_ff_disabled(Config) ->
+ %% We declare a new (fake) feature flag on node 1. Clustering the
+ %% two nodes should still be accepted because that feature flag is
+ %% disabled.
+ NewFeatureFlags = #{time_travel =>
+ #{desc => "Time travel with RabbitMQ",
+ provided_by => rabbit,
+ stability => stable}},
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ rabbit_feature_flags, initialize_registry, [NewFeatureFlags]),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, time_travel)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, time_travel));
+ false -> ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([false, false],
+ is_feature_flag_supported(Config, time_travel)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, time_travel));
+ false -> ok
+ end,
+ ok.
+
+clustering_denied_with_new_ff_enabled(Config) ->
+ %% We declare a new (fake) feature flag on node 1. Clustering the
+ %% two nodes should then be forbidden because node 2 is sure it does
+ %% not support it (because the application, `rabbit` is loaded and
+ %% it does not have it).
+ NewFeatureFlags = #{time_travel =>
+ #{desc => "Time travel with RabbitMQ",
+ provided_by => rabbit,
+ stability => stable}},
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ rabbit_feature_flags, initialize_registry, [NewFeatureFlags]),
+ enable_feature_flag_on(Config, 0, time_travel),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, time_travel)),
+ ?assertEqual([true, false],
+ is_feature_flag_enabled(Config, time_travel));
+ false -> ok
+ end,
+
+ ?assertMatch({skip, _}, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, time_travel)),
+ ?assertEqual([true, false],
+ is_feature_flag_enabled(Config, time_travel));
+ false -> ok
+ end,
+ ok.
+
+clustering_ok_with_new_ff_disabled_from_plugin_on_some_nodes(Config) ->
+ %% We first enable the test plugin on node 1, then we try to cluster
+ %% them. Even though both nodes don't share the same feature
+ %% flags (the test plugin exposes one), they should be considered
+ %% compatible and the clustering should be allowed.
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0, "my_plugin"),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, true],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+ ok.
+
+clustering_ok_with_new_ff_enabled_from_plugin_on_some_nodes(Config) ->
+ %% We first enable the test plugin on node 1 and enable its feature
+ %% flag, then we try to cluster them. Even though both nodes don't
+ %% share the same feature flags (the test plugin exposes one), they
+ %% should be considered compatible and the clustering should be
+ %% allowed.
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0, "my_plugin"),
+ enable_feature_flag_on(Config, 0, plugin_ff),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([true, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, true],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+ ok.
+
+activating_plugin_with_new_ff_disabled(Config) ->
+ %% Both nodes are clustered. A new plugin is enabled on node 1
+ %% and this plugin has a new feature flag node 2 does know about.
+ %% Enabling the plugin is allowed because nodes remain compatible,
+ %% as the plugin is missing on one node so it can't conflict.
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([false, false],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0, "my_plugin"),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, true],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+ ok.
+
+activating_plugin_with_new_ff_enabled(Config) ->
+ %% Both nodes are clustered. A new plugin is enabled on node 1
+ %% and this plugin has a new feature flag node 2 does know about.
+ %% Enabling the plugin is allowed because nodes remain compatible,
+ %% as the plugin is missing on one node so it can't conflict.
+ %% Enabling the plugin's feature flag is also permitted for this
+ %% same reason.
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([false, false],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0, "my_plugin"),
+ enable_feature_flag_on(Config, 0, plugin_ff),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, true],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+ ok.
+
+%% -------------------------------------------------------------------
+%% Internal helpers.
+%% -------------------------------------------------------------------
+
+build_my_plugin(Config) ->
+ PluginSrcDir = filename:join(?config(data_dir, Config), "my_plugin"),
+ PluginsDir = filename:join(PluginSrcDir, "plugins"),
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_plugins_dir, PluginsDir}]),
+ {MyPlugin, OtherPlugins} = list_my_plugin_plugins(PluginSrcDir),
+ case MyPlugin of
+ [] ->
+ DepsDir = ?config(erlang_mk_depsdir, Config),
+ Args = ["test-dist",
+ {"DEPS_DIR=~s", [DepsDir]},
+ %% We clear ALL_DEPS_DIRS to make sure they are
+ %% not recompiled when the plugin is built. `rabbit`
+ %% was previously compiled with -DTEST and if it is
+ %% recompiled because of this plugin, it will be
+ %% recompiled without -DTEST: the testsuite depends
+ %% on test code so we can't allow that.
+ %%
+ %% Note that we do not clear the DEPS variable:
+ %% we need it to be correct because it is used to
+ %% generate `my_plugin.app` (and a RabbitMQ plugin
+ %% must depend on `rabbit`).
+ "ALL_DEPS_DIRS="],
+ case rabbit_ct_helpers:make(Config1, PluginSrcDir, Args) of
+ {ok, _} ->
+ {_, OtherPlugins1} = list_my_plugin_plugins(PluginSrcDir),
+ remove_other_plugins(PluginSrcDir, OtherPlugins1),
+ update_cli_path(Config1, PluginSrcDir);
+ {error, _} ->
+ {skip, "Failed to compile the `my_plugin` test plugin"}
+ end;
+ _ ->
+ remove_other_plugins(PluginSrcDir, OtherPlugins),
+ update_cli_path(Config1, PluginSrcDir)
+ end.
+
+update_cli_path(Config, PluginSrcDir) ->
+ SbinDir = filename:join(PluginSrcDir, "sbin"),
+ Rabbitmqctl = filename:join(SbinDir, "rabbitmqctl"),
+ RabbitmqPlugins = filename:join(SbinDir, "rabbitmq-plugins"),
+ RabbitmqQueues = filename:join(SbinDir, "rabbitmq-queues"),
+ case filelib:is_regular(Rabbitmqctl) of
+ true ->
+ ct:pal(?LOW_IMPORTANCE,
+ "Switching to CLI in e.g. ~s", [Rabbitmqctl]),
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{rabbitmqctl_cmd, Rabbitmqctl},
+ {rabbitmq_plugins_cmd, RabbitmqPlugins},
+ {rabbitmq_queues_cmd, RabbitmqQueues}]);
+ false ->
+ Config
+ end.
+
+list_my_plugin_plugins(PluginSrcDir) ->
+ Files = filelib:wildcard("plugins/*", PluginSrcDir),
+ lists:partition(
+ fun(Path) ->
+ Filename = filename:basename(Path),
+ re:run(Filename, "^my_plugin-", [{capture, none}]) =:= match
+ end, Files).
+
+remove_other_plugins(PluginSrcDir, OtherPlugins) ->
+ ok = rabbit_file:recursive_delete(
+ [filename:join(PluginSrcDir, OtherPlugin)
+ || OtherPlugin <- OtherPlugins]).
+
+work_around_cli_and_rabbit_circular_dep(Config) ->
+ %% FIXME: We also need to copy `rabbit` in `my_plugins` plugins
+ %% directory, not because `my_plugin` depends on it, but because the
+ %% CLI erroneously depends on the broker.
+ %%
+ %% This can't be fixed easily because this is a circular dependency
+ %% (i.e. the broker depends on the CLI). So until a proper solution
+ %% is implemented, keep this second copy of the broker for the CLI
+ %% to find it.
+ InitialPluginsDir = filename:join(
+ ?config(current_srcdir, Config),
+ "plugins"),
+ PluginsDir = ?config(rmq_plugins_dir, Config),
+ lists:foreach(
+ fun(Path) ->
+ Filename = filename:basename(Path),
+ IsRabbit = re:run(
+ Filename,
+ "^rabbit-", [{capture, none}]) =:= match,
+ case IsRabbit of
+ true ->
+ Dest = filename:join(PluginsDir, Filename),
+ ct:pal(
+ ?LOW_IMPORTANCE,
+ "Copy `~s` to `~s` to fix CLI erroneous "
+ "dependency on `rabbit`", [Path, Dest]),
+ ok = rabbit_file:recursive_copy(Path, Dest);
+ false ->
+ ok
+ end
+ end,
+ filelib:wildcard(filename:join(InitialPluginsDir, "*"))),
+ Config.
+
+enable_feature_flag_on(Config, Node, FeatureName) ->
+ rabbit_ct_broker_helpers:rpc(
+ Config, Node, rabbit_feature_flags, enable, [FeatureName]).
+
+enable_feature_flag_everywhere(Config, FeatureName) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, enable, [FeatureName]).
+
+is_feature_flag_supported(Config, FeatureName) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, is_supported, [FeatureName]).
+
+is_feature_flag_enabled(Config, FeatureName) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, is_enabled, [FeatureName]).
+
+is_feature_flag_subsystem_available(Config) ->
+ lists:all(
+ fun(B) -> B end,
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, erlang, function_exported, [rabbit_feature_flags, list, 0])).
+
+feature_flags_files(Config) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, enabled_feature_flags_list_file, []).
+
+log_feature_flags_of_all_nodes(Config) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, info, [#{color => false,
+ lines => false}]).
+
+feature_flags_to_app_attrs(FeatureFlags) when is_map(FeatureFlags) ->
+ [{?MODULE, % Application
+ ?MODULE, % Module
+ maps:to_list(FeatureFlags)}].
+
+declare_arbitrary_feature_flag(Config) ->
+ FeatureFlags = #{ff_from_testsuite =>
+ #{desc => "My feature flag",
+ stability => stable}},
+ rabbit_ct_broker_helpers:rpc_all(
+ Config,
+ rabbit_feature_flags,
+ inject_test_feature_flags,
+ [feature_flags_to_app_attrs(FeatureFlags)]),
+ ok.
+
+block(Pairs) -> [block(X, Y) || {X, Y} <- Pairs].
+unblock(Pairs) -> [allow(X, Y) || {X, Y} <- Pairs].
+
+block(X, Y) ->
+ rabbit_ct_broker_helpers:block_traffic_between(X, Y).
+
+allow(X, Y) ->
+ rabbit_ct_broker_helpers:allow_traffic_between(X, Y).
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore
new file mode 100644
index 0000000000..f6d56e0687
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore
@@ -0,0 +1,7 @@
+/.erlang.mk/
+/deps/
+/ebin/
+/escript
+/plugins/
+/my_plugin.d
+/sbin
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/Makefile b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/Makefile
new file mode 100644
index 0000000000..8f6681090b
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/Makefile
@@ -0,0 +1,15 @@
+PROJECT = my_plugin
+PROJECT_DESCRIPTION = Plugin to test feature flags
+PROJECT_VERSION = 1.0.0
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/erlang.mk b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/erlang.mk
new file mode 100644
index 0000000000..f303054bad
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/erlang.mk
@@ -0,0 +1 @@
+include ../../../erlang.mk
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/rabbitmq-components.mk b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/rabbitmq-components.mk
new file mode 100644
index 0000000000..9f89dba726
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/rabbitmq-components.mk
@@ -0,0 +1 @@
+include ../../../rabbitmq-components.mk
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl
new file mode 100644
index 0000000000..687acdb5de
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl
@@ -0,0 +1,10 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(my_plugin).
+
+-rabbit_feature_flag({plugin_ff, #{desc => "Plugin's feature flag A"}}).
diff --git a/deps/rabbit/test/honeycomb_cth.erl b/deps/rabbit/test/honeycomb_cth.erl
new file mode 100644
index 0000000000..dd82da13c7
--- /dev/null
+++ b/deps/rabbit/test/honeycomb_cth.erl
@@ -0,0 +1,105 @@
+-module(honeycomb_cth).
+
+-export([id/1]).
+-export([init/2]).
+
+-export([pre_init_per_testcase/4]).
+-export([post_end_per_testcase/5]).
+
+-record(state, {directory, github_workflow, github_run_id,
+ github_repository, github_sha, github_ref,
+ base_rmq_ref, secondary_umbrella,
+ erlang_version, elixir_version,
+ otp_release, cpu_topology, schedulers,
+ system_architecture, system_memory_data,
+ start_times = #{}}).
+
+id(Opts) ->
+ proplists:get_value(directory, Opts, "/tmp/honeycomb").
+
+init(Id, _Opts) ->
+ application:ensure_all_started(os_mon),
+ {ok, #state{directory = Id,
+ github_workflow = os:getenv("GITHUB_WORKFLOW", "unknown"),
+ github_run_id = os:getenv("GITHUB_RUN_ID", "unknown"),
+ github_repository = os:getenv("GITHUB_REPOSITORY", "unknown"),
+ github_sha = os:getenv("GITHUB_SHA", "unknown"),
+ github_ref = os:getenv("GITHUB_REF", "unknown"),
+ base_rmq_ref = os:getenv("BASE_RMQ_REF", "unknown"),
+ secondary_umbrella = os:getenv("SECONDARY_UMBRELLA", "none"),
+ erlang_version = os:getenv("ERLANG_VERSION", "unknown"),
+ elixir_version = os:getenv("ELIXIR_VERSION", "unknown"),
+ otp_release = erlang:system_info(otp_release),
+ cpu_topology = erlang:system_info(cpu_topology),
+ schedulers = erlang:system_info(schedulers),
+ system_architecture = erlang:system_info(system_architecture),
+ system_memory_data = memsup:get_system_memory_data()}}.
+
+pre_init_per_testcase(Suite, TC, Config, #state{start_times = StartTimes} = State) ->
+ SuiteTimes = maps:get(Suite, StartTimes, #{}),
+ {Config, State#state{start_times =
+ StartTimes#{Suite =>
+ SuiteTimes#{TC => erlang:timestamp()}}}}.
+
+post_end_per_testcase(Suite, TC, _Config, Return, #state{github_workflow = GithubWorkflow,
+ github_run_id = GithubRunId,
+ github_repository = GithubRepository,
+ github_sha = GithubSha,
+ github_ref = GithubRef,
+ base_rmq_ref = BaseRmqRef,
+ secondary_umbrella = SecondaryUmbrella,
+ erlang_version = ErlangVersion,
+ elixir_version = ElixirVersion,
+ otp_release = OtpRelease,
+ cpu_topology = CpuTopology,
+ schedulers = Schedulers,
+ system_architecture = SystemArchitecture,
+ system_memory_data = SystemMemoryData,
+ start_times = StartTimes} = State) ->
+ EndTime = erlang:timestamp(),
+ SuiteTimes = maps:get(Suite, StartTimes),
+ {StartTime, SuiteTimes1} = maps:take(TC, SuiteTimes),
+ DurationMicroseconds = timer:now_diff(EndTime, StartTime),
+
+ File = filename(Suite, TC, State),
+ ok = filelib:ensure_dir(File),
+ {ok, F} = file:open(File, [write]),
+
+ Json = jsx:encode([{<<"ci">>, <<"GitHub Actions">>},
+ {<<"github_workflow">>, list_to_binary(GithubWorkflow)},
+ {<<"github_run_id">>, list_to_binary(GithubRunId)},
+ {<<"github_repository">>, list_to_binary(GithubRepository)},
+ {<<"github_sha">>, list_to_binary(GithubSha)},
+ {<<"github_ref">>, list_to_binary(GithubRef)},
+ {<<"base_rmq_ref">>, list_to_binary(BaseRmqRef)},
+ {<<"secondary_umbrella">>, list_to_binary(SecondaryUmbrella)},
+ {<<"erlang_version">>, list_to_binary(ErlangVersion)},
+ {<<"elixir_version">>, list_to_binary(ElixirVersion)},
+ {<<"otp_release">>, list_to_binary(OtpRelease)},
+ {<<"cpu_topology">>, cpu_topology_json_term(CpuTopology)},
+ {<<"schedulers">>, Schedulers},
+ {<<"system_architecture">>, list_to_binary(SystemArchitecture)},
+ {<<"system_memory_data">>, memory_json_term(SystemMemoryData)},
+ {<<"suite">>, list_to_binary(atom_to_list(Suite))},
+ {<<"testcase">>, list_to_binary(atom_to_list(TC))},
+ {<<"duration_seconds">>, DurationMicroseconds / 1000000},
+ {<<"result">>, list_to_binary(io_lib:format("~p", [Return]))}]),
+
+ file:write(F, Json),
+ file:close(F),
+ {Return, State#state{start_times = StartTimes#{Suite := SuiteTimes1}}}.
+
+filename(Suite, TC, #state{directory = Dir}) ->
+ filename:join(Dir,
+ integer_to_list(erlang:system_time())
+ ++ "_" ++ atom_to_list(Suite)
+ ++ "_" ++ atom_to_list(TC)
+ ++ ".json").
+
+memory_json_term(SystemMemoryData) when is_list(SystemMemoryData) ->
+ [{list_to_binary(atom_to_list(K)), V} || {K, V} <- SystemMemoryData].
+
+cpu_topology_json_term([{processor, Cores}]) when is_list(Cores) ->
+ [{<<"processor">>, [begin
+ [{<<"core">>, [{list_to_binary(atom_to_list(Kind)), Index}]}]
+ end || {core, {Kind, Index}} <- Cores]}].
diff --git a/deps/rabbit/test/lazy_queue_SUITE.erl b/deps/rabbit/test/lazy_queue_SUITE.erl
new file mode 100644
index 0000000000..8748b07aca
--- /dev/null
+++ b/deps/rabbit/test/lazy_queue_SUITE.erl
@@ -0,0 +1,215 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(lazy_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"queue.mode.test">>).
+-define(MESSAGE_COUNT, 2000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ declare_args,
+ queue_mode_policy,
+ publish_consume
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 2,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+declare_args(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ LQ = <<"lazy-q">>,
+ declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]),
+ assert_queue_mode(A, LQ, lazy),
+
+ DQ = <<"default-q">>,
+ declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]),
+ assert_queue_mode(A, DQ, default),
+
+ DQ2 = <<"default-q2">>,
+ declare(Ch, DQ2),
+ assert_queue_mode(A, DQ2, default),
+
+ passed.
+
+queue_mode_policy(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ LQ = <<"lazy-q">>,
+ declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]),
+ assert_queue_mode(A, LQ, lazy),
+
+ LQ2 = <<"lazy-q-2">>,
+ declare(Ch, LQ2),
+ assert_queue_mode(A, LQ2, lazy),
+
+ DQ = <<"default-q">>,
+ declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]),
+ assert_queue_mode(A, DQ, default),
+
+ set_ha_mode_policy(Config, A, <<"default">>),
+
+ ok = wait_for_queue_mode(A, LQ, lazy, 5000),
+ ok = wait_for_queue_mode(A, LQ2, default, 5000),
+ ok = wait_for_queue_mode(A, DQ, default, 5000),
+
+ passed.
+
+publish_consume(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ declare(Ch, ?QNAME),
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ consume(Ch, ?QNAME, ack),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"default">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ set_ha_mode_policy(Config, A, <<"default">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ cancel(Ch),
+
+ passed.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args}).
+
+consume(Ch, Q, Ack) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = Ack =:= no_ack,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+cancel(Ch) ->
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, Payload) ->
+ PBin = payload2bin(Payload),
+ receive
+ {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag)
+ end.
+
+maybe_ack(Ch, do_ack, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ DTag;
+maybe_ack(_Ch, _, DTag) ->
+ DTag.
+
+payload2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+set_ha_mode_policy(Config, Node, Mode) ->
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>, <<"all">>,
+ [{<<"queue-mode">>, Mode}]).
+
+
+wait_for_queue_mode(_Node, _Q, _Mode, Max) when Max < 0 ->
+ fail;
+wait_for_queue_mode(Node, Q, Mode, Max) ->
+ case get_queue_mode(Node, Q) of
+ Mode -> ok;
+ _ -> timer:sleep(100),
+ wait_for_queue_mode(Node, Q, Mode, Max - 100)
+ end.
+
+assert_queue_mode(Node, Q, Expected) ->
+ Actual = get_queue_mode(Node, Q),
+ Expected = Actual.
+
+get_queue_mode(Node, Q) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Q),
+ {ok, AMQQueue} =
+ rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
+ [{backing_queue_status, Status}] =
+ rpc:call(Node, rabbit_amqqueue, info,
+ [AMQQueue, [backing_queue_status]]),
+ proplists:get_value(mode, Status).
diff --git a/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl b/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl
new file mode 100644
index 0000000000..fbd31fa3e8
--- /dev/null
+++ b/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl
@@ -0,0 +1,125 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(list_consumers_sanity_check_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, list_consumers_sanity_check}
+ ].
+
+groups() ->
+ [
+ {list_consumers_sanity_check, [], [
+ list_consumers_sanity_check
+ ]}
+ ].
+
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcase
+%% -------------------------------------------------------------------
+
+list_consumers_sanity_check(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Chan = rabbit_ct_client_helpers:open_channel(Config, A),
+ %% this queue is not cleaned up because the entire node is
+ %% reset between tests
+ QName = <<"list_consumers_q">>,
+ #'queue.declare_ok'{} = amqp_channel:call(Chan, #'queue.declare'{queue = QName}),
+
+ %% No consumers even if we have some queues
+ [] = rabbitmqctl_list_consumers(Config, A),
+
+ %% Several consumers on single channel should be correctly reported
+ #'basic.consume_ok'{consumer_tag = CTag1} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}),
+ #'basic.consume_ok'{consumer_tag = CTag2} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}),
+ true = (lists:sort([CTag1, CTag2]) =:=
+ lists:sort(rabbitmqctl_list_consumers(Config, A))),
+
+ %% `rabbitmqctl report` shares some code with `list_consumers`, so
+ %% check that it also reports both channels
+ {ok, ReportStdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+ ["list_consumers", "--no-table-headers"]),
+ ReportLines = re:split(ReportStdOut, <<"\n">>, [trim]),
+ ReportCTags = [lists:nth(3, re:split(Row, <<"\t">>)) || <<"list_consumers_q", _/binary>> = Row <- ReportLines],
+ true = (lists:sort([CTag1, CTag2]) =:=
+ lists:sort(ReportCTags)).
+
+rabbitmqctl_list_consumers(Config, Node) ->
+ {ok, StdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Node,
+ ["list_consumers", "--no-table-headers"]),
+ [<<"Listing consumers", _/binary>> | ConsumerRows] = re:split(StdOut, <<"\n">>, [trim]),
+ CTags = [ lists:nth(3, re:split(Row, <<"\t">>)) || Row <- ConsumerRows ],
+ CTags.
+
+list_queues_online_and_offline(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ %% Node B will be stopped
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_2">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_2">>, durable = true}),
+
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, B, ["stop"]),
+
+ GotUp = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--online", "name", "--no-table-headers"])),
+ ExpectUp = [[<<"q_a_1">>], [<<"q_a_2">>]],
+ ExpectUp = GotUp,
+
+ GotDown = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--offline", "name", "--no-table-headers"])),
+ ExpectDown = [[<<"q_b_1">>], [<<"q_b_2">>]],
+ ExpectDown = GotDown,
+
+ GotAll = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "name", "--no-table-headers"])),
+ ExpectAll = ExpectUp ++ ExpectDown,
+ ExpectAll = GotAll,
+
+ ok.
diff --git a/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl b/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl
new file mode 100644
index 0000000000..d26fdc03e2
--- /dev/null
+++ b/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl
@@ -0,0 +1,99 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(list_queues_online_and_offline_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, list_queues_online_and_offline}
+ ].
+
+groups() ->
+ [
+ {list_queues_online_and_offline, [], [
+ list_queues_online_and_offline %% Stop node B.
+ ]}
+ ].
+
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% ---------------------------------------------------------------------------
+%% Testcase
+%% ---------------------------------------------------------------------------
+
+list_queues_online_and_offline(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ %% Node B will be stopped
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_2">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_2">>, durable = true}),
+
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, B, ["stop"]),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ [A] == rpc:call(A, rabbit_mnesia, cluster_nodes, [running])
+ end, 60000),
+
+ GotUp = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--online", "name", "--no-table-headers"])),
+ ExpectUp = [[<<"q_a_1">>], [<<"q_a_2">>]],
+ ExpectUp = GotUp,
+
+ GotDown = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--offline", "name", "--no-table-headers"])),
+ ExpectDown = [[<<"q_b_1">>], [<<"q_b_2">>]],
+ ExpectDown = GotDown,
+
+ GotAll = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "name", "--no-table-headers"])),
+ ExpectAll = ExpectUp ++ ExpectDown,
+ ExpectAll = GotAll,
+
+ ok.
diff --git a/deps/rabbit/test/maintenance_mode_SUITE.erl b/deps/rabbit/test/maintenance_mode_SUITE.erl
new file mode 100644
index 0000000000..3abbf9b064
--- /dev/null
+++ b/deps/rabbit/test/maintenance_mode_SUITE.erl
@@ -0,0 +1,284 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(maintenance_mode_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_3},
+ {group, quorum_queues}
+ ].
+
+groups() ->
+ [
+ {cluster_size_3, [], [
+ maintenance_mode_status,
+ listener_suspension_status,
+ client_connection_closure,
+ classic_mirrored_queue_leadership_transfer
+ ]},
+ {quorum_queues, [], [
+ quorum_queue_leadership_transfer
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Setup and teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_Group, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(quorum_queue_leadership_transfer = Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ MaintenanceModeFFEnabled = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, maintenance_mode_status),
+ QuorumQueueFFEnabled = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, quorum_queue),
+ case MaintenanceModeFFEnabled of
+ ok ->
+ case QuorumQueueFFEnabled of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end;
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ [fun rabbit_ct_broker_helpers:set_ha_policy_all/1]),
+ MaintenanceModeFFEnabled = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2,
+ maintenance_mode_status),
+ case MaintenanceModeFFEnabled of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end.
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+maintenance_mode_status(Config) ->
+ Nodes = [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ [begin
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_local_read(Config, Node)),
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, Node))
+ end || Node <- Nodes],
+
+ [begin
+ [begin
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, TargetNode, NodeToCheck))
+ end || NodeToCheck <- Nodes]
+ end || TargetNode <- Nodes],
+
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, B),
+ rabbit_ct_helpers:await_condition(
+ fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, B) end,
+ 10000),
+
+ [begin
+ ?assert(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, TargetNode, B))
+ end || TargetNode <- Nodes],
+
+ ?assertEqual(
+ lists:usort([A, C]),
+ lists:usort(rabbit_ct_broker_helpers:rpc(Config, B,
+ rabbit_maintenance, primary_replica_transfer_candidate_nodes, []))),
+
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, B),
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, B) end,
+ 10000),
+
+ [begin
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_local_read(Config, TargetNode, B)),
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, TargetNode, B))
+ end || TargetNode <- Nodes],
+
+ ?assertEqual(
+ lists:usort([A, C]),
+ lists:usort(rabbit_ct_broker_helpers:rpc(Config, B,
+ rabbit_maintenance, primary_replica_transfer_candidate_nodes, []))),
+
+ ok.
+
+
+listener_suspension_status(Config) ->
+ Nodes = [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ct:pal("Picked node ~s for maintenance tests...", [A]),
+
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ [begin
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, Node))
+ end || Node <- Nodes],
+
+ Conn1 = rabbit_ct_client_helpers:open_connection(Config, A),
+ ?assert(is_pid(Conn1)),
+ rabbit_ct_client_helpers:close_connection(Conn1),
+
+ rabbit_ct_broker_helpers:drain_node(Config, A),
+ rabbit_ct_helpers:await_condition(
+ fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ ?assertEqual({error, econnrefused}, rabbit_ct_client_helpers:open_unmanaged_connection(Config, A)),
+
+ rabbit_ct_broker_helpers:revive_node(Config, A),
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ Conn3 = rabbit_ct_client_helpers:open_connection(Config, A),
+ ?assert(is_pid(Conn3)),
+ rabbit_ct_client_helpers:close_connection(Conn3),
+
+ ok.
+
+
+client_connection_closure(Config) ->
+ [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ct:pal("Picked node ~s for maintenance tests...", [A]),
+
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ Conn1 = rabbit_ct_client_helpers:open_connection(Config, A),
+ ?assert(is_pid(Conn1)),
+ ?assertEqual(1, length(rabbit_ct_broker_helpers:rpc(Config, A, rabbit_networking, local_connections, []))),
+
+ rabbit_ct_broker_helpers:drain_node(Config, A),
+ ?assertEqual(0, length(rabbit_ct_broker_helpers:rpc(Config, A, rabbit_networking, local_connections, []))),
+
+ rabbit_ct_broker_helpers:revive_node(Config, A).
+
+
+classic_mirrored_queue_leadership_transfer(Config) ->
+ [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ct:pal("Picked node ~s for maintenance tests...", [A]),
+
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ PolicyPattern = <<"^cq.mirrored">>,
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, PolicyPattern, <<"all">>),
+
+ Conn = rabbit_ct_client_helpers:open_connection(Config, A),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QName = <<"cq.mirrored.1">>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName, durable = true}),
+
+ ?assertEqual(1, length(rabbit_ct_broker_helpers:rpc(Config, A, rabbit_amqqueue, list_local, [<<"/">>]))),
+
+ rabbit_ct_broker_helpers:drain_node(Config, A),
+ rabbit_ct_helpers:await_condition(
+ fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ ?assertEqual(0, length(rabbit_ct_broker_helpers:rpc(Config, A, rabbit_amqqueue, list_local, [<<"/">>]))),
+
+ rabbit_ct_broker_helpers:revive_node(Config, A),
+ %% rabbit_ct_broker_helpers:set_ha_policy/4 uses pattern for policy name
+ rabbit_ct_broker_helpers:clear_policy(Config, A, PolicyPattern).
+
+quorum_queue_leadership_transfer(Config) ->
+ [A | _] = Nodenames = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ ct:pal("Picked node ~s for maintenance tests...", [A]),
+
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ Conn = rabbit_ct_client_helpers:open_connection(Config, A),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QName = <<"qq.1">>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName, durable = true, arguments = [
+ {<<"x-queue-type">>, longstr, <<"quorum">>}
+ ]}),
+
+ %% we cannot assert on the number of local leaders here: declaring a QQ on node A
+ %% does not guarantee that the leader will be hosted on node A
+
+ rabbit_ct_broker_helpers:drain_node(Config, A),
+ rabbit_ct_helpers:await_condition(
+ fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ %% quorum queue leader election is asynchronous
+ AllTheSame = quorum_queue_utils:fifo_machines_use_same_version(
+ Config, Nodenames),
+ case AllTheSame of
+ true ->
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ LocalLeaders = rabbit_ct_broker_helpers:rpc(
+ Config, A,
+ rabbit_amqqueue,
+ list_local_leaders,
+ []),
+ length(LocalLeaders) =:= 0
+ end, 20000);
+ false ->
+ ct:pal(
+ ?LOW_IMPORTANCE,
+ "Skip leader election check because rabbit_fifo machines "
+ "have different versions", [])
+ end,
+
+ rabbit_ct_broker_helpers:revive_node(Config, A).
diff --git a/deps/rabbit/test/many_node_ha_SUITE.erl b/deps/rabbit/test/many_node_ha_SUITE.erl
new file mode 100644
index 0000000000..ece7dc8830
--- /dev/null
+++ b/deps/rabbit/test/many_node_ha_SUITE.erl
@@ -0,0 +1,112 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(many_node_ha_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+suite() ->
+ [
+ {timetrap, {minutes, 5}}
+ ].
+
+all() ->
+ [
+ {group, cluster_size_6}
+ ].
+
+groups() ->
+ [
+ {cluster_size_6, [], [
+ kill_intermediate
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_6, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 6}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+kill_intermediate(Config) ->
+ [A, B, C, D, E, F] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ MasterChannel = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConsumerChannel = rabbit_ct_client_helpers:open_channel(Config, E),
+ ProducerChannel = rabbit_ct_client_helpers:open_channel(Config, F),
+ Queue = <<"test">>,
+ amqp_channel:call(MasterChannel, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% TODO: this seems *highly* timing dependant - the assumption being
+ %% that the kill will work quickly enough that there will still be
+ %% some messages in-flight that we *must* receive despite the intervening
+ %% node deaths. It would be nice if we could find a means to do this
+ %% in a way that is not actually timing dependent.
+
+ %% Worse still, it assumes that killing the master will cause a
+ %% failover to Slave1, and so on. Nope.
+
+ ConsumerPid = rabbit_ha_test_consumer:create(ConsumerChannel,
+ Queue, self(), false, Msgs),
+
+ ProducerPid = rabbit_ha_test_producer:create(ProducerChannel,
+ Queue, self(), false, Msgs),
+
+ %% create a killer for the master and the first 3 mirrors
+ [rabbit_ct_broker_helpers:kill_node_after(Config, Node, Time) ||
+ {Node, Time} <- [{A, 50},
+ {B, 50},
+ {C, 100},
+ {D, 100}]],
+
+ %% verify that the consumer got all msgs, or die, or time out
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ rabbit_ha_test_consumer:await_response(ConsumerPid),
+ ok.
diff --git a/deps/rabbit/test/message_size_limit_SUITE.erl b/deps/rabbit/test/message_size_limit_SUITE.erl
new file mode 100644
index 0000000000..f43a582c85
--- /dev/null
+++ b/deps/rabbit/test/message_size_limit_SUITE.erl
@@ -0,0 +1,145 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(message_size_limit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT_LIST_OPS_PASS, 5000).
+-define(TIMEOUT, 30000).
+-define(TIMEOUT_CHANNEL_EXCEPTION, 5000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ max_message_size
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+max_message_size(Config) ->
+ Binary2M = gen_binary_mb(2),
+ Binary4M = gen_binary_mb(4),
+ Binary6M = gen_binary_mb(6),
+ Binary10M = gen_binary_mb(10),
+
+ Size2Mb = 1024 * 1024 * 2,
+ Size2Mb = byte_size(Binary2M),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, max_message_size, 1024 * 1024 * 3]),
+
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+
+ %% Binary is within the max size limit
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary2M}),
+ %% The channel process is alive
+ assert_channel_alive(Ch),
+
+ Monitor = monitor(process, Ch),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary4M}),
+ assert_channel_fail_max_size(Ch, Monitor),
+
+ %% increase the limit
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, max_message_size, 1024 * 1024 * 8]),
+
+ {_, Ch1} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+
+ amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"nope">>}, #amqp_msg{payload = Binary2M}),
+ assert_channel_alive(Ch1),
+
+ amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"nope">>}, #amqp_msg{payload = Binary4M}),
+ assert_channel_alive(Ch1),
+
+ amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"nope">>}, #amqp_msg{payload = Binary6M}),
+ assert_channel_alive(Ch1),
+
+ Monitor1 = monitor(process, Ch1),
+ amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary10M}),
+ assert_channel_fail_max_size(Ch1, Monitor1),
+
+ %% increase beyond the hard limit
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, max_message_size, 1024 * 1024 * 600]),
+ Val = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_channel, get_max_message_size, []),
+
+ ?assertEqual(?MAX_MSG_SIZE, Val).
+
+%% -------------------------------------------------------------------
+%% Implementation
+%% -------------------------------------------------------------------
+
+gen_binary_mb(N) ->
+ B1M = << <<"_">> || _ <- lists:seq(1, 1024 * 1024) >>,
+ << B1M || _ <- lists:seq(1, N) >>.
+
+assert_channel_alive(Ch) ->
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"nope">>},
+ #amqp_msg{payload = <<"HI">>}).
+
+assert_channel_fail_max_size(Ch, Monitor) ->
+ receive
+ {'DOWN', Monitor, process, Ch,
+ {shutdown,
+ {server_initiated_close, 406, _Error}}} ->
+ ok
+ after ?TIMEOUT_CHANNEL_EXCEPTION ->
+ error({channel_exception_expected, max_message_size})
+ end.
diff --git a/deps/rabbit/test/metrics_SUITE.erl b/deps/rabbit/test/metrics_SUITE.erl
new file mode 100644
index 0000000000..e585ccd5a8
--- /dev/null
+++ b/deps/rabbit/test/metrics_SUITE.erl
@@ -0,0 +1,404 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(metrics_SUITE).
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
+
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ connection,
+ channel,
+ channel_connection_close,
+ channel_queue_exchange_consumer_close_connection,
+ channel_queue_delete_queue,
+ connection_metric_count_test,
+ channel_metric_count_test,
+ queue_metric_count_test,
+ queue_metric_count_channel_per_queue_test,
+ connection_metric_idemp_test,
+ channel_metric_idemp_test,
+ queue_metric_idemp_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics, fine},
+ {collect_statistics_interval, 500}
+ ]}).
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ clean_core_metrics(Config),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+% NB: node_stats tests are in the management_agent repo
+
+connection_metric_count_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_connection_metric_count/1, [Config], 25).
+
+channel_metric_count_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_channel_metric_count/1, [Config], 25).
+
+queue_metric_count_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_queue_metric_count/1, [Config], 5).
+
+queue_metric_count_channel_per_queue_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_queue_metric_count_channel_per_queue/1,
+ [Config], 5).
+
+connection_metric_idemp_test(Config) ->
+ connection_metric_idemp(Config, {1, 1}),
+ connection_metric_idemp(Config, {1, 2}),
+ connection_metric_idemp(Config, {2, 2}).
+
+channel_metric_idemp_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_channel_metric_idemp/1, [Config], 25).
+
+queue_metric_idemp_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_queue_metric_idemp/1, [Config], 25).
+
+prop_connection_metric_idemp(Config) ->
+ ?FORALL(N, {integer(1, 25), integer(1, 25)},
+ connection_metric_idemp(Config, N)).
+
+prop_channel_metric_idemp(Config) ->
+ ?FORALL(N, {integer(1, 25), integer(1, 25)},
+ channel_metric_idemp(Config, N)).
+
+prop_queue_metric_idemp(Config) ->
+ ?FORALL(N, {integer(1, 25), integer(1, 25)},
+ queue_metric_idemp(Config, N)).
+
+prop_connection_metric_count(Config) ->
+ ?FORALL(N, {integer(1, 25), resize(100, list(oneof([add, remove])))},
+ connection_metric_count(Config, N)).
+
+prop_channel_metric_count(Config) ->
+ ?FORALL(N, {integer(1, 25), resize(100, list(oneof([add, remove])))},
+ channel_metric_count(Config, N)).
+
+prop_queue_metric_count(Config) ->
+ ?FORALL(N, {integer(1, 10), resize(10, list(oneof([add, remove])))},
+ queue_metric_count(Config, N)).
+
+prop_queue_metric_count_channel_per_queue(Config) ->
+ ?FORALL(N, {integer(1, 10), resize(10, list(oneof([add, remove])))},
+ queue_metric_count_channel_per_queue(Config, N)).
+
+connection_metric_idemp(Config, {N, R}) ->
+ Conns = [rabbit_ct_client_helpers:open_unmanaged_connection(Config)
+ || _ <- lists:seq(1, N)],
+ Table = ?awaitMatch(L when is_list(L) andalso length(L) == N,
+ [ Pid || {Pid, _} <- read_table_rpc(Config,
+ connection_metrics)],
+ 5000),
+ Table2 = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_coarse_metrics)],
+ % refresh stats 'R' times
+ [[Pid ! emit_stats || Pid <- Table] || _ <- lists:seq(1, R)],
+ force_metric_gc(Config),
+ TableAfter = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_metrics)],
+ TableAfter2 = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_coarse_metrics)],
+ [rabbit_ct_client_helpers:close_connection(Conn) || Conn <- Conns],
+ ?assertEqual(Table, TableAfter),
+ ?assertEqual(Table2, TableAfter2),
+ ?assertEqual(N, length(Table)),
+ ?assertEqual(N, length(TableAfter)).
+
+channel_metric_idemp(Config, {N, R}) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ [amqp_connection:open_channel(Conn) || _ <- lists:seq(1, N)],
+ Table = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_metrics)],
+ Table2 = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_process_metrics)],
+ % refresh stats 'R' times
+ [[Pid ! emit_stats || Pid <- Table] || _ <- lists:seq(1, R)],
+ force_metric_gc(Config),
+ TableAfter = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_metrics)],
+ TableAfter2 = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_process_metrics)],
+ rabbit_ct_client_helpers:close_connection(Conn),
+ (Table2 == TableAfter2) and (Table == TableAfter) and
+ (N == length(Table)) and (N == length(TableAfter)).
+
+queue_metric_idemp(Config, {N, R}) ->
+ clean_core_metrics(Config),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queues =
+ [begin
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ Queue
+ end || _ <- lists:seq(1, N)],
+
+ Table = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_metrics)],
+ Table2 = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_coarse_metrics)],
+ % refresh stats 'R' times
+ ChanTable = read_table_rpc(Config, channel_created),
+ [[Pid ! emit_stats || {Pid, _, _} <- ChanTable ] || _ <- lists:seq(1, R)],
+ force_metric_gc(Config),
+ TableAfter = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_metrics)],
+ TableAfter2 = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_coarse_metrics)],
+ [ delete_queue(Chan, Q) || Q <- Queues],
+ rabbit_ct_client_helpers:close_connection(Conn),
+ (Table2 == TableAfter2) and (Table == TableAfter) and
+ (N == length(Table)) and (N == length(TableAfter)).
+
+connection_metric_count(Config, Ops) ->
+ add_rem_counter(Config, Ops,
+ {fun rabbit_ct_client_helpers:open_unmanaged_connection/1,
+ fun(Cfg) ->
+ rabbit_ct_client_helpers:close_connection(Cfg)
+ end},
+ [ connection_created,
+ connection_metrics,
+ connection_coarse_metrics ]).
+
+channel_metric_count(Config, Ops) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ Result = add_rem_counter(Config, Ops,
+ {fun (_Config) ->
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Chan
+ end,
+ fun amqp_channel:close/1},
+ [ channel_created,
+ channel_metrics,
+ channel_process_metrics ]),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ Result.
+
+queue_metric_count(Config, Ops) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ AddFun = fun (_) ->
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+ Queue
+ end,
+ Result = add_rem_counter(Config, Ops,
+ {AddFun,
+ fun (Q) -> delete_queue(Chan, Q),
+ force_metric_gc(Config)
+ end}, [channel_queue_metrics,
+ channel_queue_exchange_metrics ]),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ Result.
+
+queue_metric_count_channel_per_queue(Config, Ops) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ AddFun = fun (_) ->
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+ {Chan, Queue}
+ end,
+ Result = add_rem_counter(Config, Ops,
+ {AddFun,
+ fun ({Chan, Q}) ->
+ delete_queue(Chan, Q),
+ force_metric_gc(Config)
+ end},
+ [ channel_queue_metrics,
+ channel_queue_exchange_metrics ]),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ Result.
+
+add_rem_counter(Config, {Initial, Ops}, {AddFun, RemFun}, Tables) ->
+ Things = [ AddFun(Config) || _ <- lists:seq(1, Initial) ],
+ % either add or remove some things
+ {FinalLen, Things1} =
+ lists:foldl(fun(add, {L, Items}) ->
+ {L+1, [AddFun(Config) | Items]};
+ (remove, {L, [H|Tail]}) ->
+ RemFun(H),
+ {L-1, Tail};
+ (_, S) -> S end,
+ {Initial, Things},
+ Ops),
+ force_metric_gc(Config),
+ TabLens = lists:map(fun(T) ->
+ length(read_table_rpc(Config, T))
+ end, Tables),
+ [RemFun(Thing) || Thing <- Things1],
+ [FinalLen] == lists:usort(TabLens).
+
+
+connection(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ [_] = read_table_rpc(Config, connection_created),
+ [_] = read_table_rpc(Config, connection_metrics),
+ [_] = read_table_rpc(Config, connection_coarse_metrics),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ force_metric_gc(Config),
+ [] = read_table_rpc(Config, connection_created),
+ [] = read_table_rpc(Config, connection_metrics),
+ [] = read_table_rpc(Config, connection_coarse_metrics),
+ ok.
+
+channel(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ [_] = read_table_rpc(Config, channel_created),
+ [_] = read_table_rpc(Config, channel_metrics),
+ [_] = read_table_rpc(Config, channel_process_metrics),
+ ok = amqp_channel:close(Chan),
+ [] = read_table_rpc(Config, channel_created),
+ [] = read_table_rpc(Config, channel_metrics),
+ [] = read_table_rpc(Config, channel_process_metrics),
+ ok = rabbit_ct_client_helpers:close_connection(Conn).
+
+channel_connection_close(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, _} = amqp_connection:open_channel(Conn),
+ [_] = read_table_rpc(Config, channel_created),
+ [_] = read_table_rpc(Config, channel_metrics),
+ [_] = read_table_rpc(Config, channel_process_metrics),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ [] = read_table_rpc(Config, channel_created),
+ [] = read_table_rpc(Config, channel_metrics),
+ [] = read_table_rpc(Config, channel_process_metrics).
+
+channel_queue_delete_queue(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+ [_] = read_table_rpc(Config, channel_queue_metrics),
+ [_] = read_table_rpc(Config, channel_queue_exchange_metrics),
+
+ delete_queue(Chan, Queue),
+ force_metric_gc(Config),
+ % ensure removal of queue cleans up channel_queue metrics
+ [] = read_table_rpc(Config, channel_queue_exchange_metrics),
+ [] = read_table_rpc(Config, channel_queue_metrics),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ ok.
+
+channel_queue_exchange_consumer_close_connection(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+
+ [_] = read_table_rpc(Config, channel_exchange_metrics),
+ [_] = read_table_rpc(Config, channel_queue_exchange_metrics),
+
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+ [_] = read_table_rpc(Config, channel_queue_metrics),
+
+ Sub = #'basic.consume'{queue = Queue},
+ #'basic.consume_ok'{consumer_tag = _} =
+ amqp_channel:call(Chan, Sub),
+
+ [_] = read_table_rpc(Config, consumer_created),
+
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ % ensure cleanup happened
+ force_metric_gc(Config),
+ [] = read_table_rpc(Config, channel_exchange_metrics),
+ [] = read_table_rpc(Config, channel_queue_exchange_metrics),
+ [] = read_table_rpc(Config, channel_queue_metrics),
+ [] = read_table_rpc(Config, consumer_created),
+ ok.
+
+
+
+%% -------------------------------------------------------------------
+%% Utilities
+%% -------------------------------------------------------------------
+
+declare_queue(Chan) ->
+ Declare = #'queue.declare'{durable = false, auto_delete = true},
+ #'queue.declare_ok'{queue = Name} = amqp_channel:call(Chan, Declare),
+ Name.
+
+delete_queue(Chan, Name) ->
+ Delete = #'queue.delete'{queue = Name},
+ #'queue.delete_ok'{} = amqp_channel:call(Chan, Delete).
+
+ensure_exchange_metrics_populated(Chan, RoutingKey) ->
+ % need to publish for exchange metrics to be populated
+ Publish = #'basic.publish'{routing_key = RoutingKey},
+ amqp_channel:call(Chan, Publish, #amqp_msg{payload = <<"hello">>}).
+
+ensure_channel_queue_metrics_populated(Chan, Queue) ->
+ % need to get and wait for timer for channel queue metrics to be populated
+ Get = #'basic.get'{queue = Queue, no_ack=true},
+ {#'basic.get_ok'{}, #amqp_msg{}} = amqp_channel:call(Chan, Get).
+
+force_channel_stats(Config) ->
+ [ Pid ! emit_stats || {Pid, _} <- read_table_rpc(Config, channel_created) ],
+ timer:sleep(100).
+
+read_table_rpc(Config, Table) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, read_table, [Table]).
+
+clean_core_metrics(Config) ->
+ [ rabbit_ct_broker_helpers:rpc(Config, 0, ets, delete_all_objects, [Table])
+ || {Table, _} <- ?CORE_TABLES].
+
+read_table(Table) ->
+ ets:tab2list(Table).
+
+force_metric_gc(Config) ->
+ timer:sleep(300),
+ rabbit_ct_broker_helpers:rpc(Config, 0, erlang, send,
+ [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, gen_server, call,
+ [rabbit_core_metrics_gc, test]).
diff --git a/deps/rabbit/test/mirrored_supervisor_SUITE.erl b/deps/rabbit/test/mirrored_supervisor_SUITE.erl
new file mode 100644
index 0000000000..7ce88cfdaa
--- /dev/null
+++ b/deps/rabbit/test/mirrored_supervisor_SUITE.erl
@@ -0,0 +1,328 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mirrored_supervisor_SUITE).
+
+-behaviour(mirrored_supervisor).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(MS, mirrored_supervisor).
+-define(SERVER, mirrored_supervisor_SUITE_gs).
+
+all() ->
+ [
+ migrate,
+ migrate_twice,
+ already_there,
+ delete_restart,
+ which_children,
+ large_group,
+ childspecs_at_init,
+ anonymous_supervisors,
+ no_migration_on_shutdown,
+ start_idempotence,
+ unsupported,
+ ignore,
+ startup_failure
+ ].
+
+init_per_suite(Config) ->
+ ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
+ ok = application:start(mnesia),
+ lists:foreach(
+ fun ({Tab, TabDef}) ->
+ TabDef1 = proplists:delete(match, TabDef),
+ case mnesia:create_table(Tab, TabDef1) of
+ {atomic, ok} ->
+ ok;
+ {aborted, Reason} ->
+ throw({error,
+ {table_creation_failed, Tab, TabDef1, Reason}})
+ end
+ end, mirrored_supervisor:table_definitions()),
+ Config.
+
+end_per_suite(Config) ->
+ ok = application:stop(mnesia),
+ Config.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+%% Simplest test
+migrate(_Config) ->
+ passed = with_sups(
+ fun([A, _]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b]).
+
+%% Is migration transitive?
+migrate_twice(_Config) ->
+ passed = with_sups(
+ fun([A, B]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ {ok, C} = start_sup(c),
+ Pid2 = pid_of(worker),
+ kill_registered(B, Pid2),
+ Pid3 = pid_of(worker),
+ false = (Pid1 =:= Pid3),
+ kill(C)
+ end, [a, b]).
+
+%% Can't start the same child twice
+already_there(_Config) ->
+ passed = with_sups(
+ fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, S),
+ {error, {already_started, Pid}} = ?MS:start_child(b, S)
+ end, [a, b]).
+
+%% Deleting and restarting should work as per a normal supervisor
+delete_restart(_Config) ->
+ passed = with_sups(
+ fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid1} = ?MS:start_child(a, S),
+ {error, running} = ?MS:delete_child(a, worker),
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid2} = ?MS:start_child(b, S),
+ false = (Pid1 =:= Pid2),
+ ok = ?MS:terminate_child(b, worker),
+ {ok, Pid3} = ?MS:restart_child(b, worker),
+ Pid3 = pid_of(worker),
+ false = (Pid2 =:= Pid3),
+ %% Not the same supervisor as the worker is on
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid4} = ?MS:start_child(a, S),
+ false = (Pid3 =:= Pid4)
+ end, [a, b]).
+
+which_children(_Config) ->
+ passed = with_sups(
+ fun([A, B] = Both) ->
+ ?MS:start_child(A, childspec(worker)),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ok = ?MS:terminate_child(a, worker),
+ assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
+ {ok, _} = ?MS:restart_child(a, worker),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ?MS:start_child(B, childspec(worker2)),
+ assert_wc(Both, fun (C) -> 2 = length(C) end)
+ end, [a, b]).
+
+assert_wc(Sups, Fun) ->
+ [Fun(?MS:which_children(Sup)) || Sup <- Sups].
+
+wc_pid(Child) ->
+ {worker, Pid, worker, [?MODULE]} = Child,
+ Pid.
+
+%% Not all the members of the group should actually do the failover
+large_group(_Config) ->
+ passed = with_sups(
+ fun([A, _, _, _]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b, c, d]).
+
+%% Do childspecs work when returned from init?
+childspecs_at_init(_Config) ->
+ S = childspec(worker),
+ passed = with_sups(
+ fun([A, _]) ->
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [{a, [S]}, {b, [S]}]).
+
+anonymous_supervisors(_Config) ->
+ passed = with_sups(
+ fun([A, _B]) ->
+ {ok, _} = ?MS:start_child(A, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [anon, anon]).
+
+%% When a mirrored_supervisor terminates, we should not migrate, but
+%% the whole supervisor group should shut down. To test this we set up
+%% a situation where the gen_server will only fail if it's running
+%% under the supervisor called 'evil'. It should not migrate to
+%% 'good' and survive, rather the whole group should go away.
+no_migration_on_shutdown(_Config) ->
+ passed = with_sups(
+ fun([Evil, _]) ->
+ {ok, _} = ?MS:start_child(Evil, childspec(worker)),
+ try
+ call(worker, ping, 1000, 100),
+ exit(worker_should_not_have_migrated)
+ catch exit:{timeout_waiting_for_server, _, _} ->
+ ok
+ end
+ end, [evil, good]).
+
+start_idempotence(_Config) ->
+ passed = with_sups(
+ fun([_]) ->
+ CS = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, CS),
+ {error, {already_started, Pid}} = ?MS:start_child(a, CS),
+ ?MS:terminate_child(a, worker),
+ {error, already_present} = ?MS:start_child(a, CS)
+ end, [a]).
+
+unsupported(_Config) ->
+ try
+ ?MS:start_link({global, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {one_for_one, []}),
+ exit(no_global)
+ catch error:badarg ->
+ ok
+ end,
+ try
+ {ok, _} = ?MS:start_link({local, foo}, get_group(group),
+ fun tx_fun/1, ?MODULE, {simple_one_for_one, []}),
+ exit(no_sofo)
+ catch error:badarg ->
+ ok
+ end.
+
+%% Just test we don't blow up
+ignore(_Config) ->
+ ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {fake_strategy_for_ignore, []}).
+
+startup_failure(_Config) ->
+ [test_startup_failure(F) || F <- [want_error, want_exit]].
+
+test_startup_failure(Fail) ->
+ process_flag(trap_exit, true),
+ ?MS:start_link(get_group(group), fun tx_fun/1, ?MODULE,
+ {one_for_one, [childspec(Fail)]}),
+ receive
+ {'EXIT', _, shutdown} ->
+ ok
+ after 1000 ->
+ exit({did_not_exit, Fail})
+ end,
+ process_flag(trap_exit, false).
+
+%% ---------------------------------------------------------------------------
+
+with_sups(Fun, Sups) ->
+ inc_group(),
+ Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
+ Fun(Pids),
+ [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
+ timer:sleep(500),
+ passed.
+
+start_sup(Spec) ->
+ start_sup(Spec, group).
+
+start_sup({Name, ChildSpecs}, Group) ->
+ {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
+ %% We are not a supervisor, when we kill the supervisor we do not
+ %% want to die!
+ unlink(Pid),
+ {ok, Pid};
+
+start_sup(Name, Group) ->
+ start_sup({Name, []}, Group).
+
+start_sup0(anon, Group, ChildSpecs) ->
+ ?MS:start_link(Group, fun tx_fun/1, ?MODULE,
+ {one_for_one, ChildSpecs});
+
+start_sup0(Name, Group, ChildSpecs) ->
+ ?MS:start_link({local, Name}, Group, fun tx_fun/1, ?MODULE,
+ {one_for_one, ChildSpecs}).
+
+childspec(Id) ->
+ {Id,{?SERVER, start_link, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
+
+pid_of(Id) ->
+ {received, Pid, ping} = call(Id, ping),
+ Pid.
+
+tx_fun(Fun) ->
+ case mnesia:sync_transaction(Fun) of
+ {atomic, Result} -> Result;
+ {aborted, Reason} -> throw({error, Reason})
+ end.
+
+inc_group() ->
+ Count = case get(counter) of
+ undefined -> 0;
+ C -> C
+ end + 1,
+ put(counter, Count).
+
+get_group(Group) ->
+ {Group, get(counter)}.
+
+call(Id, Msg) -> call(Id, Msg, 10*1000, 100).
+
+call(Id, Msg, MaxDelay, Decr) ->
+ call(Id, Msg, MaxDelay, Decr, undefined).
+
+call(Id, Msg, 0, _Decr, Stacktrace) ->
+ exit({timeout_waiting_for_server, {Id, Msg}, Stacktrace});
+
+call(Id, Msg, MaxDelay, Decr, _) ->
+ try
+ gen_server:call(Id, Msg, infinity)
+ catch exit:_:Stacktrace -> timer:sleep(Decr),
+ call(Id, Msg, MaxDelay - Decr, Decr, Stacktrace)
+ end.
+
+kill(Pid) -> kill(Pid, []).
+kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
+kill(Pid, Waits) ->
+ erlang:monitor(process, Pid),
+ [erlang:monitor(process, P) || P <- Waits],
+ exit(Pid, bang),
+ kill_wait(Pid),
+ [kill_wait(P) || P <- Waits].
+
+kill_registered(Pid, Child) ->
+ {registered_name, Name} = erlang:process_info(Child, registered_name),
+ kill(Pid, Child),
+ false = (Child =:= whereis(Name)),
+ ok.
+
+kill_wait(Pid) ->
+ receive
+ {'DOWN', _Ref, process, Pid, _Reason} ->
+ ok
+ end.
+
+%% ---------------------------------------------------------------------------
+
+init({fake_strategy_for_ignore, _ChildSpecs}) ->
+ ignore;
+
+init({Strategy, ChildSpecs}) ->
+ {ok, {{Strategy, 0, 1}, ChildSpecs}}.
diff --git a/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl b/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl
new file mode 100644
index 0000000000..62245231d7
--- /dev/null
+++ b/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mirrored_supervisor_SUITE_gs).
+
+%% Dumb gen_server we can supervise
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-behaviour(gen_server).
+
+-define(MS, mirrored_supervisor).
+
+start_link(want_error) ->
+ {error, foo};
+
+start_link(want_exit) ->
+ exit(foo);
+
+start_link(Id) ->
+ gen_server:start_link({local, Id}, ?MODULE, [], []).
+
+%% ---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, state}.
+
+handle_call(Msg, _From, State) ->
+ die_if_my_supervisor_is_evil(),
+ {reply, {received, self(), Msg}, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+die_if_my_supervisor_is_evil() ->
+ try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
+ false -> ok;
+ _ -> exit(doooom)
+ catch
+ exit:{noproc, _} -> ok
+ end.
diff --git a/deps/rabbit/test/msg_store_SUITE.erl b/deps/rabbit/test/msg_store_SUITE.erl
new file mode 100644
index 0000000000..e349aa4443
--- /dev/null
+++ b/deps/rabbit/test/msg_store_SUITE.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(msg_store_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+-define(T(Fun, Args), (catch apply(rabbit, Fun, Args))).
+
+all() ->
+ [
+ parameter_validation
+ ].
+
+parameter_validation(_Config) ->
+ %% make sure it works with default values
+ ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [?CREDIT_DISC_BOUND, ?IO_BATCH_SIZE]),
+
+ %% IO_BATCH_SIZE must be greater than CREDIT_DISC_BOUND initial credit
+ ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{4000, 800}, 5000]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{4000, 800}, 1500]),
+
+ %% All values must be integers
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 500}, "1500"]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{"2000", 500}, abc]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, "500"}, 2048]),
+
+ %% CREDIT_DISC_BOUND must be a tuple
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [[2000, 500], 1500]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [2000, 1500]),
+
+ %% config values can't be smaller than default values
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{1999, 500}, 2048]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 499}, 2048]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 500}, 2047]).
diff --git a/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl b/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl
new file mode 100644
index 0000000000..ddb753adf8
--- /dev/null
+++ b/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(peer_discovery_classic_config_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-import(rabbit_ct_broker_helpers, [
+ cluster_members_online/2
+]).
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel}
+ ].
+
+groups() ->
+ [
+ {non_parallel, [], [
+ successful_discovery,
+ successful_discovery_with_a_subset_of_nodes_coming_online,
+ no_nodes_configured
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 5}}
+ ].
+
+
+%%
+%% Setup/teardown.
+%%
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(successful_discovery = Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+
+ N = 3,
+ NodeNames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, N)
+ ],
+ Config2 = rabbit_ct_helpers:set_config(Config1, [
+ {rmq_nodename_suffix, Testcase},
+ %% note: this must not include the host part
+ {rmq_nodes_count, NodeNames},
+ {rmq_nodes_clustered, false}
+ ]),
+ NodeNamesWithHostname = [rabbit_nodes:make({Name, "localhost"}) || Name <- NodeNames],
+ Config3 = rabbit_ct_helpers:merge_app_env(Config2,
+ {rabbit, [
+ {cluster_nodes, {NodeNamesWithHostname, disc}},
+ {cluster_formation, [
+ {randomized_startup_delay_range, {1, 10}}
+ ]}
+ ]}),
+ rabbit_ct_helpers:run_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(successful_discovery_with_a_subset_of_nodes_coming_online = Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+
+ N = 2,
+ NodeNames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, N)
+ ],
+ Config2 = rabbit_ct_helpers:set_config(Config1, [
+ {rmq_nodename_suffix, Testcase},
+ %% note: this must not include the host part
+ {rmq_nodes_count, NodeNames},
+ {rmq_nodes_clustered, false}
+ ]),
+ NodeNamesWithHostname = [rabbit_nodes:make({Name, "localhost"}) || Name <- [nonexistent | NodeNames]],
+ %% reduce retry time since we know one node on the list does
+ %% not exist and not just unreachable
+ Config3 = rabbit_ct_helpers:merge_app_env(Config2,
+ {rabbit, [
+ {cluster_formation, [
+ {discovery_retry_limit, 10},
+ {discovery_retry_interval, 200}
+ ]},
+ {cluster_nodes, {NodeNamesWithHostname, disc}},
+ {cluster_formation, [
+ {randomized_startup_delay_range, {1, 10}}
+ ]}
+ ]}),
+ rabbit_ct_helpers:run_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(no_nodes_configured = Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1, [
+ {rmq_nodename_suffix, Testcase},
+ {rmq_nodes_count, 2},
+ {rmq_nodes_clustered, false}
+ ]),
+ Config3 = rabbit_ct_helpers:merge_app_env(Config2,
+ {rabbit, [
+ {cluster_nodes, {[], disc}},
+ {cluster_formation, [
+ {randomized_startup_delay_range, {1, 10}}
+ ]}
+ ]}),
+ rabbit_ct_helpers:run_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+
+%%
+%% Test cases
+%%
+successful_discovery(Config) ->
+ Condition = fun() ->
+ 3 =:= length(cluster_members_online(Config, 0)) andalso
+ 3 =:= length(cluster_members_online(Config, 1))
+ end,
+ await_cluster(Config, Condition, [1, 2]).
+
+successful_discovery_with_a_subset_of_nodes_coming_online(Config) ->
+ Condition = fun() ->
+ 2 =:= length(cluster_members_online(Config, 0)) andalso
+ 2 =:= length(cluster_members_online(Config, 1))
+ end,
+ await_cluster(Config, Condition, [1]).
+
+no_nodes_configured(Config) ->
+ Condition = fun() -> length(cluster_members_online(Config, 0)) < 2 end,
+ await_cluster(Config, Condition, [1]).
+
+reset_and_restart_node(Config, I) when is_integer(I) andalso I >= 0 ->
+ Name = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ rabbit_control_helper:command(stop_app, Name),
+ rabbit_ct_broker_helpers:reset_node(Config, Name),
+ rabbit_control_helper:command(start_app, Name).
+
+await_cluster(Config, Condition, Nodes) ->
+ try
+ rabbit_ct_helpers:await_condition(Condition, 30000)
+ catch
+ exit:{test_case_failed, _} ->
+ ct:pal(?LOW_IMPORTANCE, "Possible dead-lock; resetting/restarting these nodes: ~p", [Nodes]),
+ [reset_and_restart_node(Config, N) || N <- Nodes],
+ rabbit_ct_helpers:await_condition(Condition, 30000)
+ end.
diff --git a/deps/rabbit/test/peer_discovery_dns_SUITE.erl b/deps/rabbit/test/peer_discovery_dns_SUITE.erl
new file mode 100644
index 0000000000..5184bc11eb
--- /dev/null
+++ b/deps/rabbit/test/peer_discovery_dns_SUITE.erl
@@ -0,0 +1,104 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(peer_discovery_dns_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel}
+ ].
+
+groups() ->
+ [
+ {non_parallel, [], [
+ hostname_discovery_with_long_node_names,
+ hostname_discovery_with_short_node_names,
+ node_discovery_with_long_node_names,
+ node_discovery_with_short_node_names,
+ test_aaaa_record_hostname_discovery
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 1}}
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+%% These are stable, publicly resolvable hostnames that
+%% both return A and AAAA records that reverse resolve.
+-define(DISCOVERY_ENDPOINT_RECORD_A, "dns.google").
+-define(DISCOVERY_ENDPOINT_RECORD_AAAA, "dns.google").
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+
+init_per_testcase(test_aaaa_record, Config) ->
+ case inet_res:lookup(?DISCOVERY_ENDPOINT_RECORD_AAAA, in, aaaa) of
+ [] ->
+ {skip, "pre-configured AAAA record does not resolve, skipping"};
+ [_ | _] ->
+ Config
+ end;
+
+init_per_testcase(_Testcase, Config) ->
+ case inet_res:lookup(?DISCOVERY_ENDPOINT_RECORD_A, in, a) of
+ [] ->
+ {skip, "pre-configured *.rabbitmq.com record does not resolve, skipping"};
+ [_ | _] ->
+ Config
+ end.
+
+
+end_per_testcase(_Testcase, Config) ->
+ case inet_res:lookup(?DISCOVERY_ENDPOINT_RECORD_A, in, a) of
+ [] ->
+ {skip, "pre-configured *.rabbitmq.com record does not resolve, skipping"};
+ [_ | _] ->
+ Config
+ end.
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+test_aaaa_record_hostname_discovery(_) ->
+ Result = rabbit_peer_discovery_dns:discover_hostnames(?DISCOVERY_ENDPOINT_RECORD_AAAA, true),
+ ?assert(string:str(lists:flatten(Result), "dns.google") > 0).
+
+hostname_discovery_with_long_node_names(_) ->
+ Result = rabbit_peer_discovery_dns:discover_hostnames(?DISCOVERY_ENDPOINT_RECORD_A, true),
+ ?assert(lists:member("dns.google", Result)).
+
+hostname_discovery_with_short_node_names(_) ->
+ Result = rabbit_peer_discovery_dns:discover_hostnames(?DISCOVERY_ENDPOINT_RECORD_A, false),
+ ?assert(lists:member("dns", Result)).
+
+node_discovery_with_long_node_names(_) ->
+ Result = rabbit_peer_discovery_dns:discover_nodes(?DISCOVERY_ENDPOINT_RECORD_A, true),
+ ?assert(lists:member('ct_rabbit@dns.google', Result)).
+
+node_discovery_with_short_node_names(_) ->
+ Result = rabbit_peer_discovery_dns:discover_nodes(?DISCOVERY_ENDPOINT_RECORD_A, false),
+ ?assert(lists:member(ct_rabbit@dns, Result)).
diff --git a/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl
new file mode 100644
index 0000000000..43c860c8bd
--- /dev/null
+++ b/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl
@@ -0,0 +1,1651 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_user_connection_channel_limit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ most_basic_single_node_connection_and_channel_count,
+ single_node_single_user_connection_and_channel_count,
+ single_node_multiple_users_connection_and_channel_count,
+ single_node_list_in_user,
+ single_node_single_user_limit,
+ single_node_single_user_zero_limit,
+ single_node_single_user_clear_limits,
+ single_node_multiple_users_clear_limits,
+ single_node_multiple_users_limit,
+ single_node_multiple_users_zero_limit
+
+ ],
+ ClusterSize2Tests = [
+ most_basic_cluster_connection_and_channel_count,
+ cluster_single_user_connection_and_channel_count,
+ cluster_multiple_users_connection_and_channel_count,
+ cluster_node_restart_connection_and_channel_count,
+ cluster_node_list_on_node,
+ cluster_single_user_limit,
+ cluster_single_user_limit2,
+ cluster_single_user_zero_limit,
+ cluster_single_user_clear_limits,
+ cluster_multiple_users_clear_limits,
+ cluster_multiple_users_zero_limit
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2);
+
+init_per_group(cluster_rename, Config) ->
+ init_per_multinode_group(cluster_rename, Config, 2).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ case Group of
+ cluster_rename ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config1;
+ _ ->
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1, rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, user_limits),
+ case EnableFF of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end
+ end.
+
+end_per_group(cluster_rename, Config) ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ clear_all_channel_tracking_tables(Config),
+ Config.
+
+end_per_testcase(Testcase, Config) ->
+ clear_all_connection_tracking_tables(Config),
+ clear_all_channel_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+clear_all_connection_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_connection_tracking,
+ clear_tracking_tables,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+clear_all_channel_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_channel_tracking,
+ clear_tracking_tables,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+most_basic_single_node_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn] = open_connections(Config, [0]),
+ [Chan] = open_channels(Conn, 1),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 1
+ end),
+ close_channels([Chan]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+ close_connections([Conn]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end).
+
+single_node_single_user_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ [Chan1] = open_channels(Conn1, 1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 1
+ end),
+ close_channels([Chan1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [0]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ [Conn4] = open_connections(Config, [0]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ close_connections([Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ [Conn5] = open_connections(Config, [0]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end).
+
+single_node_multiple_users_connection_and_channel_count(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username1) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [{0, Username1}]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+ close_channels(Chans1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0
+ end),
+ ?assertEqual(0, count_channels_of_user(Config, Username1)),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username1) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 1 andalso
+ count_channels_of_user(Config, Username2) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [{0, Username1}]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 1 andalso
+ count_channels_of_user(Config, Username2) =:= 5
+ end),
+
+ [Conn4] = open_connections(Config, [{0, Username1}]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 2 andalso
+ count_channels_of_user(Config, Username1) =:= 10
+ end),
+
+ close_connections([Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+
+ [Conn5] = open_connections(Config, [{0, Username2}]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 2 andalso
+ count_channels_of_user(Config, Username2) =:= 10
+ end),
+
+ [Conn6] = open_connections(Config, [{0, Username2}]),
+ Chans6 = [_|_] = open_channels(Conn6, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 3 andalso
+ count_channels_of_user(Config, Username2) =:= 15
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5 ++ Chans6),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username1),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2).
+
+single_node_list_in_user(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_in(Config, Username1)) =:= 0 andalso
+ length(connections_in(Config, Username2)) =:= 0
+ end),
+
+ ?assertEqual(0, length(channels_in(Config, Username1))),
+ ?assertEqual(0, length(channels_in(Config, Username2))),
+
+ [Conn1] = open_connections(Config, [{0, Username1}]),
+ [Chan1] = open_channels(Conn1, 1),
+ [#tracked_connection{username = Username1}] = connections_in(Config, Username1),
+ [#tracked_channel{username = Username1}] = channels_in(Config, Username1),
+ close_channels([Chan1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(channels_in(Config, Username1)) =:= 0
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_in(Config, Username1)) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ [Chan2] = open_channels(Conn2, 1),
+ [#tracked_connection{username = Username2}] = connections_in(Config, Username2),
+ [#tracked_channel{username = Username2}] = channels_in(Config, Username2),
+
+ [Conn3] = open_connections(Config, [{0, Username1}]),
+ [Chan3] = open_channels(Conn3, 1),
+ [#tracked_connection{username = Username1}] = connections_in(Config, Username1),
+ [#tracked_channel{username = Username1}] = channels_in(Config, Username1),
+
+ [Conn4] = open_connections(Config, [{0, Username1}]),
+ [_Chan4] = open_channels(Conn4, 1),
+ close_connections([Conn4]),
+ [#tracked_connection{username = Username1}] = connections_in(Config, Username1),
+ [#tracked_channel{username = Username1}] = channels_in(Config, Username1),
+
+ [Conn5, Conn6] = open_connections(Config, [{0, Username2}, {0, Username2}]),
+ [Chan5] = open_channels(Conn5, 1),
+ [Chan6] = open_channels(Conn6, 1),
+ [<<"guest1">>, <<"guest2">>] =
+ lists:usort(lists:map(fun (#tracked_connection{username = V}) -> V end,
+ all_connections(Config))),
+ [<<"guest1">>, <<"guest2">>] =
+ lists:usort(lists:map(fun (#tracked_channel{username = V}) -> V end,
+ all_channels(Config))),
+
+ close_channels([Chan2, Chan3, Chan5, Chan6]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_channels(Config)) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_connections(Config)) =:= 0
+ end),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username1),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2).
+
+most_basic_cluster_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+ ?assertEqual(1, count_connections_of_user(Config, Username)),
+ ?assertEqual(5, count_channels_of_user(Config, Username)),
+
+ [Conn2] = open_connections(Config, [1]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(2, count_connections_of_user(Config, Username)),
+ ?assertEqual(10, count_channels_of_user(Config, Username)),
+
+ [Conn3] = open_connections(Config, [1]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ ?assertEqual(3, count_connections_of_user(Config, Username)),
+ ?assertEqual(15, count_channels_of_user(Config, Username)),
+
+ close_channels(Chans1 ++ Chans2 ++ Chans3),
+ ?awaitMatch(0, count_channels_of_user(Config, Username), 60000),
+
+ close_connections([Conn1, Conn2, Conn3]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 60000).
+
+cluster_single_user_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [1]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ [Conn4] = open_connections(Config, [1]),
+ Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end).
+
+cluster_multiple_users_connection_and_channel_count(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [{0, Username1}]),
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username1) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 1 andalso
+ count_channels_of_user(Config, Username2) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [{1, Username1}]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 1 andalso
+ count_channels_of_user(Config, Username2) =:= 5
+ end),
+
+ [Conn4] = open_connections(Config, [{0, Username1}]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 2 andalso
+ count_channels_of_user(Config, Username1) =:= 10
+ end),
+
+ close_connections([Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+
+ [Conn5] = open_connections(Config, [{1, Username2}]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 2 andalso
+ count_channels_of_user(Config, Username2) =:= 10
+ end),
+
+ [Conn6] = open_connections(Config, [{0, Username2}]),
+ Chans6 = [_|_] = open_channels(Conn6, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 3 andalso
+ count_channels_of_user(Config, Username2) =:= 15
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5 ++ Chans6),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username1),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2).
+
+cluster_node_restart_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [1]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ [Conn4] = open_connections(Config, [1]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ [Conn5] = open_connections(Config, [1]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 4 andalso
+ count_channels_of_user(Config, Username) =:= 20
+ end),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end).
+
+cluster_node_list_on_node(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_connections(Config)) =:= 0 andalso
+ length(all_channels(Config)) =:= 0 andalso
+ length(connections_on_node(Config, 0)) =:= 0 andalso
+ length(channels_on_node(Config, 0)) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ [#tracked_connection{node = A}] = connections_on_node(Config, 0),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length([Ch || Ch <- channels_on_node(Config, 0), Ch#tracked_channel.node =:= A]) =:= 5
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 0)) =:= 0 andalso
+ length(channels_on_node(Config, 0)) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [1]),
+ _Chans2 = [_|_] = open_channels(Conn2, 5),
+ [#tracked_connection{node = B}] = connections_on_node(Config, 1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length([Ch || Ch <- channels_on_node(Config, 1), Ch#tracked_channel.node =:= B]) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 0)) =:= 1 andalso
+ length(channels_on_node(Config, 0)) =:= 5
+ end),
+
+ [Conn4] = open_connections(Config, [1]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 1)) =:= 2 andalso
+ length(channels_on_node(Config, 1)) =:= 10
+ end),
+
+ close_connections([Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 1)) =:= 1 andalso
+ length(channels_on_node(Config, 1)) =:= 5
+ end),
+
+ [Conn5] = open_connections(Config, [0]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 0)) =:= 2 andalso
+ length(channels_on_node(Config, 0)) =:= 10
+ end),
+
+ rabbit_ct_broker_helpers:stop_broker(Config, 1),
+ await_running_node_refresh(Config, 0),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_connections(Config)) =:= 2 andalso
+ length(all_channels(Config)) =:= 10
+ end),
+
+ close_channels(Chans3 ++ Chans5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_channels(Config)) =:= 0
+ end),
+
+ close_connections([Conn3, Conn5]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_connections(Config)) =:= 0
+ end),
+
+ rabbit_ct_broker_helpers:start_broker(Config, 1).
+
+single_node_single_user_limit(Config) ->
+ single_node_single_user_limit_with(Config, 5, 25),
+ single_node_single_user_limit_with(Config, -1, -1).
+
+single_node_single_user_limit_with(Config, ConnLimit, ChLimit) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 3, 15),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username)),
+ ?assertEqual(0, count_channels_of_user(Config, Username)),
+
+ [Conn1, Conn2, Conn3] = Conns1 = open_connections(Config, [0, 0, 0]),
+ [_Chans1, Chans2, Chans3] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_channel_is_rejected(Conn1),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) andalso
+ is_process_alive(Conn3)
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, ConnLimit, ChLimit),
+ [Conn4, Conn5] = Conns2 = open_connections(Config, [0, 0]),
+ [Chans4, Chans5] = [open_channels(Conn, 5) || Conn <- Conns2],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4, Conn5]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 60000),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+single_node_single_user_zero_limit(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 0, 0),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username)),
+ ?assertEqual(0, count_channels_of_user(Config, Username)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config),
+ expect_that_client_connection_is_rejected(Config),
+ expect_that_client_connection_is_rejected(Config),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username, 1, 0),
+ [ConnA] = open_connections(Config, [0]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(ConnA) =:= false andalso
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1),
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [0, 0]),
+ [Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ close_channels(Chans1 ++ Chans2),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 60000).
+
+single_node_single_user_clear_limits(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 3, 15),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1, Conn2, Conn3] = Conns1 = open_connections(Config, [0, 0, 0]),
+ [_Chans1, Chans2, Chans3] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_channel_is_rejected(Conn1),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) andalso
+ is_process_alive(Conn3)
+ end),
+
+ %% reach limit again
+ [Conn4] = open_connections(Config, [{0, Username}]),
+ Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ clear_all_user_limits(Config, Username),
+
+ [Conn5, Conn6, Conn7] = Conns2 = open_connections(Config, [0, 0, 0]),
+ [Chans5, Chans6, Chans7] = [open_channels(Conn, 5) || Conn <- Conns2],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5 ++ Chans6 ++ Chans7),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5, Conn6, Conn7]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 5000),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+single_node_multiple_users_clear_limits(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 0, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 0, 0),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username1)),
+ ?assertEqual(0, count_connections_of_user(Config, Username2)),
+ ?assertEqual(0, count_channels_of_user(Config, Username1)),
+ ?assertEqual(0, count_channels_of_user(Config, Username2)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username1, 1, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 1, 0),
+ [ConnA, ConnB] = open_connections(Config, [{0, Username1}, {0, Username2}]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+ expect_that_client_channel_is_rejected(ConnB),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(ConnA) =:= false andalso
+ is_process_alive(ConnB) =:= false
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ clear_all_user_limits(Config, Username1),
+ set_user_channel_limit_only(Config, Username2, -1),
+ set_user_connection_limit_only(Config, Username2, -1),
+
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username1}, {0, Username1}]),
+ [Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1).
+
+single_node_multiple_users_limit(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 2, 10),
+ set_user_connection_and_channel_limit(Config, Username2, 2, 10),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ [Conn1, Conn2, Conn3, Conn4] = Conns1 = open_connections(Config, [
+ {0, Username1},
+ {0, Username1},
+ {0, Username2},
+ {0, Username2}]),
+
+ [_Chans1, Chans2, Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_channel_is_rejected(Conn1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn3) =:= true
+ end),
+
+ [Conn5] = open_connections(Config, [0]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+
+ set_user_connection_and_channel_limit(Config, Username1, 5, 25),
+ set_user_connection_and_channel_limit(Config, Username2, -10, -50),
+
+ [Conn6, Conn7, Conn8, Conn9, Conn10] = Conns2 = open_connections(Config, [
+ {0, Username1},
+ {0, Username1},
+ {0, Username1},
+ {0, Username2},
+ {0, Username2}]),
+
+ [Chans6, Chans7, Chans8, Chans9, Chans10] = [open_channels(Conn, 5) || Conn <- Conns2],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5 ++ Chans6 ++
+ Chans7 ++ Chans8 ++ Chans9 ++ Chans10),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5, Conn6,
+ Conn7, Conn8, Conn9, Conn10]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username1),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2).
+
+
+single_node_multiple_users_zero_limit(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 0, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 0, 0),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username1, 1, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 1, 0),
+ [ConnA, ConnB] = open_connections(Config, [{0, Username1}, {0, Username2}]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+ expect_that_client_channel_is_rejected(ConnB),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(ConnA) =:= false andalso
+ is_process_alive(ConnB) =:= false
+ end),
+
+ ?assertEqual(false, is_process_alive(ConnA)),
+ ?assertEqual(false, is_process_alive(ConnB)),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username1}, {0, Username1}]),
+ [Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1).
+
+
+cluster_single_user_limit(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_limit_only(Config, Username, 2),
+ set_user_channel_limit_only(Config, Username, 10),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ %% here connections and channels are opened to different nodes
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username}, {1, Username}]),
+ [_Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, Username),
+ expect_that_client_connection_is_rejected(Config, 1, Username),
+ expect_that_client_channel_is_rejected(Conn1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) =:= true
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, 5, 25),
+
+ [Conn3, Conn4] = Conns2 = open_connections(Config, [{0, Username}, {0, Username}]),
+ [Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns2],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4),
+ ?awaitMatch(0, count_channels_of_user(Config, Username), 60000),
+
+ close_connections([Conn2, Conn3, Conn4]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 60000),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+cluster_single_user_limit2(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 2, 10),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username)),
+ ?assertEqual(0, count_channels_of_user(Config, Username)),
+
+ %% here a limit is reached on one node first
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username}, {0, Username}]),
+ [_Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, Username),
+ expect_that_client_connection_is_rejected(Config, 1, Username),
+ expect_that_client_channel_is_rejected(Conn1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) =:= true
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, 5, 25),
+
+ [Conn3, Conn4, Conn5, Conn6, {error, not_allowed}] = open_connections(Config, [
+ {1, Username},
+ {1, Username},
+ {1, Username},
+ {1, Username},
+ {1, Username}]),
+
+ [Chans3, Chans4, Chans5, Chans6, [{error, not_allowed}]] =
+ [open_channels(Conn, 1) || Conn <- [Conn3, Conn4, Conn5, Conn6, Conn1]],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5 ++ Chans6),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5, Conn6]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 5000),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+
+cluster_single_user_zero_limit(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 0, 0),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username)),
+ ?assertEqual(0, count_channels_of_user(Config, Username)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 1),
+ expect_that_client_connection_is_rejected(Config, 0),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username, 1, 0),
+ [ConnA] = open_connections(Config, [0]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+ ?assertEqual(false, is_process_alive(ConnA)),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1),
+ [Conn1, Conn2, Conn3, Conn4] = Conns1 = open_connections(Config, [0, 1, 0, 1]),
+ [Chans1, Chans2, Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2 ++ Chans3 ++ Chans4),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+cluster_single_user_clear_limits(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 2, 10),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ %% here a limit is reached on one node first
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username}, {0, Username}]),
+ [_Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, Username),
+ expect_that_client_connection_is_rejected(Config, 1, Username),
+ expect_that_client_channel_is_rejected(Conn1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) =:= true
+ end),
+ clear_all_user_limits(Config, Username),
+
+ [Conn3, Conn4, Conn5, Conn6, Conn7] = open_connections(Config, [
+ {1, Username},
+ {1, Username},
+ {1, Username},
+ {1, Username},
+ {1, Username}]),
+
+ [Chans3, Chans4, Chans5, Chans6, Chans7] =
+ [open_channels(Conn, 1) || Conn <- [Conn3, Conn4, Conn5, Conn6, Conn7]],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5 ++ Chans6 ++ Chans7),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5, Conn6, Conn7]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+cluster_multiple_users_clear_limits(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 0, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 0, 0),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_connection_is_rejected(Config, 1, Username1),
+ expect_that_client_connection_is_rejected(Config, 1, Username2),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username1, 1, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 1, 0),
+ [ConnA, ConnB] = open_connections(Config, [{0, Username1}, {1, Username2}]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_connections_of_user(Config, Username2) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(ConnA) =:= false andalso
+ is_process_alive(ConnB) =:= true
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 1
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+ close_connections([ConnB]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+ ?assertEqual(false, is_process_alive(ConnB)),
+
+ clear_all_user_limits(Config, Username1),
+ clear_all_user_limits(Config, Username2),
+
+ [Conn1, Conn2, Conn3, Conn4] = Conns1 = open_connections(Config, [
+ {0, Username1},
+ {0, Username2},
+ {1, Username1},
+ {1, Username2}]),
+
+ [Chans1, Chans2, Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2 ++ Chans3 ++ Chans4),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1).
+
+cluster_multiple_users_zero_limit(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 0, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 0, 0),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_connection_is_rejected(Config, 1, Username1),
+ expect_that_client_connection_is_rejected(Config, 1, Username2),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username1, 1, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 1, 0),
+ [ConnA, ConnB] = open_connections(Config, [{0, Username1}, {1, Username2}]),
+
+ expect_that_client_channel_is_rejected(ConnA),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 1
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+ ?assertEqual(false, is_process_alive(ConnA)),
+ ?assertEqual(true, is_process_alive(ConnB)),
+ close_connections([ConnB]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+ ?assertEqual(false, is_process_alive(ConnB)),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1),
+
+ [Conn1, Conn2, Conn3, Conn4] = Conns1 = open_connections(Config, [
+ {0, Username1},
+ {0, Username2},
+ {1, Username1},
+ {1, Username2}]),
+
+ [Chans1, Chans2, Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2 ++ Chans3 ++ Chans4),
+ ?awaitMatch(0, count_channels_of_user(Config, Username1), 60000),
+ ?awaitMatch(0, count_channels_of_user(Config, Username2), 60000),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username1), 60000),
+ ?awaitMatch(0, count_connections_of_user(Config, Username2), 60000),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1).
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndUsers) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, User}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ User, User);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndUsers),
+ timer:sleep(100),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns).
+
+open_channels(Conn, N) ->
+ [open_channel(Conn) || _ <- lists:seq(1, N)].
+
+open_channel(Conn) when is_pid(Conn) ->
+ try amqp_connection:open_channel(Conn) of
+ {ok, Ch} -> Ch
+ catch
+ _:_Error -> {error, not_allowed}
+ end.
+
+close_channels(Channels = [_|_]) ->
+ [rabbit_ct_client_helpers:close_channel(Ch) || Ch <- Channels].
+
+count_connections_of_user(Config, Username) ->
+ count_connections_in(Config, Username, 0).
+count_connections_in(Config, Username, NodeIndex) ->
+ count_user_tracked_items(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+count_channels_of_user(Config, Username) ->
+ count_channels_in(Config, Username, 0).
+count_channels_in(Config, Username, NodeIndex) ->
+ count_user_tracked_items(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+count_user_tracked_items(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ count_tracked_items_in, [{user, Username}]).
+
+connections_in(Config, Username) ->
+ connections_in(Config, 0, Username).
+connections_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+channels_in(Config, Username) ->
+ channels_in(Config, 0, Username).
+channels_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+tracked_list_of_user(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list_of_user, [Username]).
+
+connections_on_node(Config) ->
+ connections_on_node(Config, 0).
+connections_on_node(Config, NodeIndex) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, NodeIndex, nodename),
+ tracked_items_on_node(Config, NodeIndex, rabbit_connection_tracking, Node).
+
+channels_on_node(Config) ->
+ channels_on_node(Config, 0).
+channels_on_node(Config, NodeIndex) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, NodeIndex, nodename),
+ tracked_items_on_node(Config, NodeIndex, rabbit_channel_tracking, Node).
+
+tracked_items_on_node(Config, NodeIndex, TrackingMod, NodeForListing) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list_on_node, [NodeForListing]).
+
+all_connections(Config) ->
+ all_connections(Config, 0).
+all_connections(Config, NodeIndex) ->
+ all_tracked_items(Config, NodeIndex, rabbit_connection_tracking).
+
+all_channels(Config) ->
+ all_channels(Config, 0).
+all_channels(Config, NodeIndex) ->
+ all_tracked_items(Config, NodeIndex, rabbit_channel_tracking).
+
+all_tracked_items(Config, NodeIndex, TrackingMod) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list, []).
+
+set_up_user(Config, Username) ->
+ VHost = proplists:get_value(rmq_vhost, Config),
+ rabbit_ct_broker_helpers:add_user(Config, Username),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username, VHost),
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+set_user_connection_and_channel_limit(Config, Username, ConnLimit, ChLimit) ->
+ set_user_connection_and_channel_limit(Config, 0, Username, ConnLimit, ChLimit).
+
+set_user_connection_and_channel_limit(Config, NodeIndex, Username, ConnLimit, ChLimit) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_user_limits, Node, [rabbit_data_coercion:to_list(Username)] ++
+ ["{\"max-connections\": " ++ integer_to_list(ConnLimit) ++ "," ++
+ " \"max-channels\": " ++ integer_to_list(ChLimit) ++ "}"]).
+
+set_user_connection_limit_only(Config, Username, ConnLimit) ->
+ set_user_connection_limit_only(Config, 0, Username, ConnLimit).
+
+set_user_connection_limit_only(Config, NodeIndex, Username, ConnLimit) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_user_limits, Node, [rabbit_data_coercion:to_list(Username)] ++
+ ["{\"max-connections\": " ++ integer_to_list(ConnLimit) ++ "}"]).
+
+set_user_channel_limit_only(Config, Username, ChLimit) ->
+ set_user_channel_limit_only(Config, 0, Username, ChLimit).
+
+set_user_channel_limit_only(Config, NodeIndex, Username, ChLimit) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_user_limits, Node, [rabbit_data_coercion:to_list(Username)] ++
+ ["{\"max-channels\": " ++ integer_to_list(ChLimit) ++ "}"]).
+
+clear_all_user_limits(Config, Username) ->
+ clear_all_user_limits(Config, 0, Username).
+clear_all_user_limits(Config, NodeIndex, Username) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ clear_user_limits, Node, [rabbit_data_coercion:to_list(Username), "all"]).
+
+await_running_node_refresh(_Config, _NodeIndex) ->
+ timer:sleep(250).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, User) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, User, User).
+
+expect_that_client_channel_is_rejected(Conn) ->
+ {error, not_allowed} = open_channel(Conn).
diff --git a/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl
new file mode 100644
index 0000000000..8af68f0112
--- /dev/null
+++ b/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl
@@ -0,0 +1,182 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_user_connection_channel_limit_partitions_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-import(rabbit_ct_client_helpers, [open_unmanaged_connection/2
+ ]).
+
+all() ->
+ [
+ {group, net_ticktime_1}
+ ].
+
+groups() ->
+ [
+ {net_ticktime_1, [], [
+ cluster_full_partition_with_autoheal
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% see partitions_SUITE
+-define(DELAY, 12000).
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(
+ Config, [fun rabbit_ct_broker_helpers:configure_dist_proxy/1]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(net_ticktime_1 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{net_ticktime, 1}]),
+ init_per_multinode_group(Group, Config1, 3).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1, rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, user_limits),
+ case EnableFF of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+cluster_full_partition_with_autoheal(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_broker_helpers:set_partition_handling_mode_globally(Config, autoheal),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% 6 connections, 2 per node
+ Conn1 = open_unmanaged_connection(Config, A),
+ Conn2 = open_unmanaged_connection(Config, A),
+ Conn3 = open_unmanaged_connection(Config, B),
+ Conn4 = open_unmanaged_connection(Config, B),
+ Conn5 = open_unmanaged_connection(Config, C),
+ Conn6 = open_unmanaged_connection(Config, C),
+
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ _Chans3 = [_|_] = open_channels(Conn3, 5),
+ _Chans5 = [_|_] = open_channels(Conn5, 5),
+ wait_for_count_connections_in(Config, Username, 6, 60000),
+ ?assertEqual(15, count_channels_in(Config, Username)),
+
+ %% B drops off the network, non-reachable by either A or C
+ rabbit_ct_broker_helpers:block_traffic_between(A, B),
+ rabbit_ct_broker_helpers:block_traffic_between(B, C),
+ timer:sleep(?DELAY),
+
+ %% A and C are still connected, so 4 connections are tracked
+ %% All connections to B are dropped
+ wait_for_count_connections_in(Config, Username, 4, 60000),
+ ?assertEqual(10, count_channels_in(Config, Username)),
+
+ rabbit_ct_broker_helpers:allow_traffic_between(A, B),
+ rabbit_ct_broker_helpers:allow_traffic_between(B, C),
+ timer:sleep(?DELAY),
+
+ %% during autoheal B's connections were dropped
+ wait_for_count_connections_in(Config, Username, 4, 60000),
+ ?assertEqual(10, count_channels_in(Config, Username)),
+
+ lists:foreach(fun (Conn) ->
+ (catch rabbit_ct_client_helpers:close_connection(Conn))
+ end, [Conn1, Conn2, Conn3, Conn4,
+ Conn5, Conn6]),
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+
+ passed.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+wait_for_count_connections_in(Config, Username, Expected, Time) when Time =< 0 ->
+ ?assertMatch(Connections when length(Connections) == Expected,
+ connections_in(Config, Username));
+wait_for_count_connections_in(Config, Username, Expected, Time) ->
+ case connections_in(Config, Username) of
+ Connections when length(Connections) == Expected ->
+ ok;
+ _ ->
+ Sleep = 3000,
+ timer:sleep(Sleep),
+ wait_for_count_connections_in(Config, Username, Expected, Time - Sleep)
+ end.
+
+open_channels(Conn, N) ->
+ [begin
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Ch
+ end || _ <- lists:seq(1, N)].
+
+count_connections_in(Config, Username) ->
+ length(connections_in(Config, Username)).
+
+connections_in(Config, Username) ->
+ connections_in(Config, 0, Username).
+connections_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+count_channels_in(Config, Username) ->
+ Channels = channels_in(Config, Username),
+ length([Ch || Ch = #tracked_channel{username = Username0} <- Channels,
+ Username =:= Username0]).
+
+channels_in(Config, Username) ->
+ channels_in(Config, 0, Username).
+channels_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+tracked_list_of_user(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list_of_user, [Username]).
diff --git a/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl
new file mode 100644
index 0000000000..8b4bd91d09
--- /dev/null
+++ b/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl
@@ -0,0 +1,850 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_user_connection_channel_tracking_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_1_direct},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ single_node_user_connection_channel_tracking,
+ single_node_user_deletion,
+ single_node_vhost_down_mimic,
+ single_node_vhost_deletion
+ ],
+ ClusterSize2Tests = [
+ cluster_user_deletion,
+ cluster_vhost_down_mimic,
+ cluster_vhost_deletion,
+ cluster_node_removed
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_1_direct, [], ClusterSize1Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_1_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_1_direct, Config1, 1);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1, rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, user_limits),
+ case EnableFF of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ clear_all_channel_tracking_tables(Config),
+ Config.
+
+end_per_testcase(Testcase, Config) ->
+ clear_all_connection_tracking_tables(Config),
+ clear_all_channel_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+clear_all_connection_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_connection_tracking,
+ clear_tracking_tables,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+clear_all_channel_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_channel_tracking,
+ clear_tracking_tables,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+single_node_user_connection_channel_tracking(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ [Chan1] = open_channels(Conn1, 1),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+ [#tracked_channel{username = Username}] = channels_in(Config, Username),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ ?assertEqual(true, is_process_alive(Chan1)),
+ close_channels([Chan1]),
+ ?awaitMatch(0, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+ ?awaitMatch(false, is_process_alive(Chan1), 20000),
+ close_connections([Conn1]),
+ ?awaitMatch(0, length(connections_in(Config, Username)), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000),
+ ?awaitMatch(false, is_process_alive(Conn1), 20000),
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ timer:sleep(100),
+ [#tracked_connection{username = Username2}] = connections_in(Config, Username2),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn3)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans3],
+
+ [Conn4] = open_connections(Config, [0]),
+ Chans4 = [_|_] = open_channels(Conn4, 5),
+ ?assertEqual(2, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(10, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn4)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans4],
+ kill_connections([Conn4]),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+ ?awaitMatch(5, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(1, tracked_user_connection_count(Config, Username), 20000),
+ ?awaitMatch(5, tracked_user_channel_count(Config, Username), 20000),
+ ?assertEqual(false, is_process_alive(Conn4)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans4],
+
+ [Conn5] = open_connections(Config, [0]),
+ Chans5 = [_|_] = open_channels(Conn5, 7),
+ [Username, Username] =
+ lists:map(fun (#tracked_connection{username = U}) -> U end,
+ connections_in(Config, Username)),
+ ?assertEqual(12, count_channels_in(Config, Username)),
+ ?assertEqual(12, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(2, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn5)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans5],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5),
+ ?awaitMatch(0, length(all_channels(Config)), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username2), 20000),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username2), 20000),
+ ?awaitMatch(0, length(all_connections(Config)), 20000).
+
+single_node_user_deletion(Config) ->
+ set_tracking_execution_timeout(Config, 100),
+
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(100, get_tracking_execution_timeout(Config)),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(true, exists_in_tracked_connection_per_user_table(Config, Username2)),
+ ?assertEqual(true, exists_in_tracked_channel_per_user_table(Config, Username2)),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ timer:sleep(100),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ %% ensure vhost entry is cleared after 'tracking_execution_timeout'
+ ?awaitMatch(false, exists_in_tracked_connection_per_user_table(Config, Username2), 20000),
+ ?awaitMatch(false, exists_in_tracked_channel_per_user_table(Config, Username2), 20000),
+
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ close_channels(Chans1),
+ ?awaitMatch(0, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+
+ close_connections([Conn1]),
+ ?awaitMatch(0, count_connections_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000).
+
+single_node_vhost_deletion(Config) ->
+ set_tracking_execution_timeout(Config, 100),
+
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(100, get_tracking_execution_timeout(Config)),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(true, exists_in_tracked_connection_per_vhost_table(Config, Vhost)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, Vhost),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(false, is_process_alive(Conn1)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans1],
+
+ %% ensure vhost entry is cleared after 'tracking_execution_timeout'
+ ?assertEqual(false, exists_in_tracked_connection_per_vhost_table(Config, Vhost)),
+
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost).
+
+single_node_vhost_down_mimic(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ %% mimic vhost down event, while connections exist
+ mimic_vhost_down(Config, 0, Vhost),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(false, is_process_alive(Conn1)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans1].
+
+cluster_user_deletion(Config) ->
+ set_tracking_execution_timeout(Config, 0, 100),
+ set_tracking_execution_timeout(Config, 1, 100),
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(100, get_tracking_execution_timeout(Config, 0)),
+ ?assertEqual(100, get_tracking_execution_timeout(Config, 1)),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(true, exists_in_tracked_connection_per_user_table(Config, 1, Username2)),
+ ?assertEqual(true, exists_in_tracked_channel_per_user_table(Config, 1, Username2)),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ %% ensure user entry is cleared after 'tracking_execution_timeout'
+ ?assertEqual(false, exists_in_tracked_connection_per_user_table(Config, 1, Username2)),
+ ?assertEqual(false, exists_in_tracked_channel_per_user_table(Config, 1, Username2)),
+
+ close_channels(Chans1),
+ ?awaitMatch(0, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+
+ close_connections([Conn1]),
+ ?awaitMatch(0, count_connections_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000).
+
+cluster_vhost_deletion(Config) ->
+ set_tracking_execution_timeout(Config, 0, 100),
+ set_tracking_execution_timeout(Config, 1, 100),
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(100, get_tracking_execution_timeout(Config, 0)),
+ ?assertEqual(100, get_tracking_execution_timeout(Config, 1)),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [{0, Username}]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(true, exists_in_tracked_connection_per_vhost_table(Config, 0, Vhost)),
+ ?assertEqual(true, exists_in_tracked_connection_per_vhost_table(Config, 1, Vhost)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, Vhost),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(false, is_process_alive(Conn1)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans1],
+
+ %% ensure vhost entry is cleared after 'tracking_execution_timeout'
+ ?assertEqual(false, exists_in_tracked_connection_per_vhost_table(Config, 0, Vhost)),
+ ?assertEqual(false, exists_in_tracked_connection_per_vhost_table(Config, 1, Vhost)),
+
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost),
+ rabbit_ct_broker_helpers:add_user(Config, Username),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username, Vhost).
+
+cluster_vhost_down_mimic(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [{0, Username}]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ mimic_vhost_down(Config, 1, Vhost),
+ timer:sleep(100),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ %% gen_event notifies local handlers. remote connections still active
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ mimic_vhost_down(Config, 0, Vhost),
+ timer:sleep(100),
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(false, is_process_alive(Conn1)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans1].
+
+cluster_node_removed(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [{0, Username}]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ rabbit_ct_broker_helpers:stop_broker(Config, 1),
+ timer:sleep(200),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ rabbit_ct_broker_helpers:forget_cluster_node(Config, 0, 1),
+ timer:sleep(200),
+ NodeName = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+
+ DroppedConnTrackingTables =
+ rabbit_connection_tracking:get_all_tracked_connection_table_names_for_node(NodeName),
+ [?assertEqual(
+ {'EXIT', {aborted, {no_exists, Tab, all}}},
+ catch mnesia:table_info(Tab, all)) || Tab <- DroppedConnTrackingTables],
+
+ DroppedChTrackingTables =
+ rabbit_channel_tracking:get_all_tracked_channel_table_names_for_node(NodeName),
+ [?assertEqual(
+ {'EXIT', {aborted, {no_exists, Tab, all}}},
+ catch mnesia:table_info(Tab, all)) || Tab <- DroppedChTrackingTables],
+
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ close_channels(Chans1),
+ ?awaitMatch(0, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+
+ close_connections([Conn1]),
+ ?awaitMatch(0, count_connections_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000).
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndUsers) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, User}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ User, User);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndUsers),
+ timer:sleep(500),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns),
+ timer:sleep(500).
+
+kill_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ (catch exit(Conn, please_terminate))
+ end, Conns),
+ timer:sleep(500).
+
+open_channels(Conn, N) ->
+ [begin
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Ch
+ end || _ <- lists:seq(1, N)].
+
+close_channels(Channels = [_|_]) ->
+ [rabbit_ct_client_helpers:close_channel(Ch) || Ch <- Channels].
+
+count_connections_in(Config, Username) ->
+ length(connections_in(Config, Username)).
+
+connections_in(Config, Username) ->
+ connections_in(Config, 0, Username).
+connections_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+count_channels_in(Config, Username) ->
+ Channels = channels_in(Config, Username),
+ length([Ch || Ch = #tracked_channel{username = Username0} <- Channels,
+ Username =:= Username0]).
+
+channels_in(Config, Username) ->
+ channels_in(Config, 0, Username).
+channels_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+tracked_list_of_user(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list_of_user, [Username]).
+
+tracked_user_connection_count(Config, Username) ->
+ tracked_user_connection_count(Config, 0, Username).
+tracked_user_connection_count(Config, NodeIndex, Username) ->
+ count_user_tracked_items(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+tracked_user_channel_count(Config, Username) ->
+ tracked_user_channel_count(Config, 0, Username).
+tracked_user_channel_count(Config, NodeIndex, Username) ->
+ count_user_tracked_items(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+count_user_tracked_items(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ count_tracked_items_in, [{user, Username}]).
+
+exists_in_tracked_connection_per_vhost_table(Config, VHost) ->
+ exists_in_tracked_connection_per_vhost_table(Config, 0, VHost).
+exists_in_tracked_connection_per_vhost_table(Config, NodeIndex, VHost) ->
+ exists_in_tracking_table(Config, NodeIndex,
+ fun rabbit_connection_tracking:tracked_connection_per_vhost_table_name_for/1,
+ VHost).
+
+exists_in_tracked_connection_per_user_table(Config, Username) ->
+ exists_in_tracked_connection_per_user_table(Config, 0, Username).
+exists_in_tracked_connection_per_user_table(Config, NodeIndex, Username) ->
+ exists_in_tracking_table(Config, NodeIndex,
+ fun rabbit_connection_tracking:tracked_connection_per_user_table_name_for/1,
+ Username).
+
+exists_in_tracked_channel_per_user_table(Config, Username) ->
+ exists_in_tracked_channel_per_user_table(Config, 0, Username).
+exists_in_tracked_channel_per_user_table(Config, NodeIndex, Username) ->
+ exists_in_tracking_table(Config, NodeIndex,
+ fun rabbit_channel_tracking:tracked_channel_per_user_table_name_for/1,
+ Username).
+
+exists_in_tracking_table(Config, NodeIndex, TableNameFun, Key) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ Tab = TableNameFun(Node),
+ AllKeys = rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ mnesia,
+ dirty_all_keys, [Tab]),
+ lists:member(Key, AllKeys).
+
+mimic_vhost_down(Config, NodeIndex, VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_vhost, vhost_down, [VHost]).
+
+all_connections(Config) ->
+ all_connections(Config, 0).
+all_connections(Config, NodeIndex) ->
+ all_tracked_items(Config, NodeIndex, rabbit_connection_tracking).
+
+all_channels(Config) ->
+ all_channels(Config, 0).
+all_channels(Config, NodeIndex) ->
+ all_tracked_items(Config, NodeIndex, rabbit_channel_tracking).
+
+all_tracked_items(Config, NodeIndex, TrackingMod) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list, []).
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_connection_limit(Config, VHost, -1).
+
+set_vhost_connection_limit(Config, VHost, Count) ->
+ set_vhost_connection_limit(Config, 0, VHost, Count).
+
+set_vhost_connection_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-connections\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+set_tracking_execution_timeout(Config, Timeout) ->
+ set_tracking_execution_timeout(Config, 0, Timeout).
+set_tracking_execution_timeout(Config, NodeIndex, Timeout) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ application, set_env,
+ [rabbit, tracking_execution_timeout, Timeout]).
+
+get_tracking_execution_timeout(Config) ->
+ get_tracking_execution_timeout(Config, 0).
+get_tracking_execution_timeout(Config, NodeIndex) ->
+ {ok, Timeout} = rabbit_ct_broker_helpers:rpc(
+ Config, NodeIndex,
+ application, get_env,
+ [rabbit, tracking_execution_timeout]),
+ Timeout.
+
+await_running_node_refresh(_Config, _NodeIndex) ->
+ timer:sleep(250).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, VHost) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, VHost).
diff --git a/deps/rabbit/test/per_user_connection_tracking_SUITE.erl b/deps/rabbit/test/per_user_connection_tracking_SUITE.erl
new file mode 100644
index 0000000000..36b0962eac
--- /dev/null
+++ b/deps/rabbit/test/per_user_connection_tracking_SUITE.erl
@@ -0,0 +1,269 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_user_connection_tracking_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_1_direct},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ single_node_list_of_user,
+ single_node_user_deletion_forces_connection_closure
+ ],
+ ClusterSize2Tests = [
+ cluster_user_deletion_forces_connection_closure
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_1_direct, [], ClusterSize1Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_1_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_1_direct, Config1, 1);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2).
+
+init_per_multinode_group(_Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ Config.
+
+end_per_testcase(Testcase, Config) ->
+ clear_all_connection_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+clear_all_connection_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_connection_tracking,
+ clear_tracked_connection_tables_for_this_node,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+single_node_list_of_user(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, length(connections_in(Config, Username))),
+ ?assertEqual(0, length(connections_in(Config, Username2))),
+
+ [Conn1] = open_connections(Config, [0]),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+ close_connections([Conn1]),
+ ?assertEqual(0, length(connections_in(Config, Username))),
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ [#tracked_connection{username = Username2}] = connections_in(Config, Username2),
+
+ [Conn3] = open_connections(Config, [0]),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+
+ [Conn4] = open_connections(Config, [0]),
+ kill_connections([Conn4]),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+
+ [Conn5] = open_connections(Config, [0]),
+ [Username, Username] =
+ lists:map(fun (#tracked_connection{username = U}) -> U end,
+ connections_in(Config, Username)),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ ?assertEqual(0, length(all_connections(Config))).
+
+single_node_user_deletion_forces_connection_closure(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+
+ [_Conn2] = open_connections(Config, [{0, Username2}]),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, Username)).
+
+cluster_user_deletion_forces_connection_closure(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [{0, Username}]),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+
+ [_Conn2] = open_connections(Config, [{1, Username2}]),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, Username)).
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndUsers) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, User}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ User, User);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndUsers),
+ timer:sleep(500),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns),
+ timer:sleep(500).
+
+kill_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ (catch exit(Conn, please_terminate))
+ end, Conns),
+ timer:sleep(500).
+
+
+count_connections_in(Config, Username) ->
+ length(connections_in(Config, Username)).
+
+connections_in(Config, Username) ->
+ connections_in(Config, 0, Username).
+connections_in(Config, NodeIndex, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list_of_user, [Username]).
+
+all_connections(Config) ->
+ all_connections(Config, 0).
+all_connections(Config, NodeIndex) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list, []).
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_connection_limit(Config, VHost, -1).
+
+set_vhost_connection_limit(Config, VHost, Count) ->
+ set_vhost_connection_limit(Config, 0, VHost, Count).
+
+set_vhost_connection_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-connections\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+await_running_node_refresh(_Config, _NodeIndex) ->
+ timer:sleep(250).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, VHost) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, VHost).
diff --git a/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl
new file mode 100644
index 0000000000..a140b3e829
--- /dev/null
+++ b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl
@@ -0,0 +1,751 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_vhost_connection_limit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_1_direct},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ most_basic_single_node_connection_count,
+ single_node_single_vhost_connection_count,
+ single_node_multiple_vhosts_connection_count,
+ single_node_list_in_vhost,
+ single_node_single_vhost_limit,
+ single_node_single_vhost_zero_limit,
+ single_node_multiple_vhosts_limit,
+ single_node_multiple_vhosts_zero_limit
+ ],
+ ClusterSize2Tests = [
+ most_basic_cluster_connection_count,
+ cluster_single_vhost_connection_count,
+ cluster_multiple_vhosts_connection_count,
+ cluster_node_restart_connection_count,
+ cluster_node_list_on_node,
+ cluster_single_vhost_limit,
+ cluster_single_vhost_limit2,
+ cluster_single_vhost_zero_limit,
+ cluster_multiple_vhosts_zero_limit
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_1_direct, [], ClusterSize1Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests},
+ {cluster_rename, [], [
+ vhost_limit_after_node_renamed
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% see partitions_SUITE
+-define(DELAY, 9000).
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_1_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_1_direct, Config1, 1);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2);
+
+init_per_group(cluster_rename, Config) ->
+ init_per_multinode_group(cluster_rename, Config, 2).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ case Group of
+ cluster_rename ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config1;
+ _ ->
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps())
+ end.
+
+end_per_group(cluster_rename, Config) ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ Config.
+
+end_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) ->
+ Config1 = ?config(save_config, Config),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase);
+end_per_testcase(Testcase, Config) ->
+ clear_all_connection_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+clear_all_connection_tracking_tables(Config) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config,
+ rabbit_connection_tracking,
+ clear_tracked_connection_tables_for_this_node,
+ []).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+most_basic_single_node_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+ [Conn] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+ close_connections([Conn]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+single_node_single_vhost_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn2] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ [Conn3] = open_connections(Config, [0]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn4] = open_connections(Config, [0]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ kill_connections([Conn4]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn5] = open_connections(Config, [0]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+single_node_multiple_vhosts_connection_count(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+
+ [Conn2] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ [Conn3] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ [Conn4] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(2, count_connections_in(Config, VHost1)),
+
+ kill_connections([Conn4]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [Conn5] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(2, count_connections_in(Config, VHost2)),
+
+ [Conn6] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(3, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+single_node_list_in_vhost(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, length(connections_in(Config, VHost1))),
+ ?assertEqual(0, length(connections_in(Config, VHost2))),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ [#tracked_connection{vhost = VHost1}] = connections_in(Config, VHost1),
+ close_connections([Conn1]),
+ ?assertEqual(0, length(connections_in(Config, VHost1))),
+
+ [Conn2] = open_connections(Config, [{0, VHost2}]),
+ [#tracked_connection{vhost = VHost2}] = connections_in(Config, VHost2),
+
+ [Conn3] = open_connections(Config, [{0, VHost1}]),
+ [#tracked_connection{vhost = VHost1}] = connections_in(Config, VHost1),
+
+ [Conn4] = open_connections(Config, [{0, VHost1}]),
+ kill_connections([Conn4]),
+ [#tracked_connection{vhost = VHost1}] = connections_in(Config, VHost1),
+
+ [Conn5, Conn6] = open_connections(Config, [{0, VHost2}, {0, VHost2}]),
+ [<<"vhost1">>, <<"vhost2">>] =
+ lists:usort(lists:map(fun (#tracked_connection{vhost = V}) -> V end,
+ all_connections(Config))),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ ?assertEqual(0, length(all_connections(Config))),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+most_basic_cluster_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ [Conn2] = open_connections(Config, [1]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn3] = open_connections(Config, [1]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ close_connections([Conn1, Conn2, Conn3]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+cluster_single_vhost_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn2] = open_connections(Config, [1]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ [Conn3] = open_connections(Config, [0]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn4] = open_connections(Config, [1]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ kill_connections([Conn4]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn5] = open_connections(Config, [1]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+cluster_multiple_vhosts_connection_count(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+
+ [Conn2] = open_connections(Config, [{1, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ [Conn3] = open_connections(Config, [{1, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ [Conn4] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(2, count_connections_in(Config, VHost1)),
+
+ kill_connections([Conn4]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [Conn5] = open_connections(Config, [{1, VHost2}]),
+ ?assertEqual(2, count_connections_in(Config, VHost2)),
+
+ [Conn6] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(3, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+cluster_node_restart_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn2] = open_connections(Config, [1]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ [Conn3] = open_connections(Config, [0]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn4] = open_connections(Config, [1]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ [Conn5] = open_connections(Config, [1]),
+ ?assertEqual(4, count_connections_in(Config, VHost)),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 1),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+cluster_node_list_on_node(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ ?assertEqual(0, length(all_connections(Config))),
+ ?assertEqual(0, length(connections_on_node(Config, 0))),
+
+ [Conn1] = open_connections(Config, [0]),
+ [#tracked_connection{node = A}] = connections_on_node(Config, 0),
+ close_connections([Conn1]),
+ ?assertEqual(0, length(connections_on_node(Config, 0))),
+
+ [_Conn2] = open_connections(Config, [1]),
+ [#tracked_connection{node = B}] = connections_on_node(Config, 1),
+
+ [Conn3] = open_connections(Config, [0]),
+ ?assertEqual(1, length(connections_on_node(Config, 0))),
+
+ [Conn4] = open_connections(Config, [1]),
+ ?assertEqual(2, length(connections_on_node(Config, 1))),
+
+ kill_connections([Conn4]),
+ ?assertEqual(1, length(connections_on_node(Config, 1))),
+
+ [Conn5] = open_connections(Config, [0]),
+ ?assertEqual(2, length(connections_on_node(Config, 0))),
+
+ rabbit_ct_broker_helpers:stop_broker(Config, 1),
+ await_running_node_refresh(Config, 0),
+
+ ?assertEqual(2, length(all_connections(Config))),
+ ?assertEqual(0, length(connections_on_node(Config, 0, B))),
+
+ close_connections([Conn3, Conn5]),
+ ?assertEqual(0, length(all_connections(Config, 0))),
+
+ rabbit_ct_broker_helpers:start_broker(Config, 1).
+
+single_node_single_vhost_limit(Config) ->
+ single_node_single_vhost_limit_with(Config, 5),
+ single_node_single_vhost_limit_with(Config, -1).
+
+single_node_single_vhost_limit_with(Config, WatermarkLimit) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 3),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1, Conn2, Conn3] = open_connections(Config, [0, 0, 0]),
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+
+ set_vhost_connection_limit(Config, VHost, WatermarkLimit),
+ [Conn4, Conn5] = open_connections(Config, [0, 0]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ set_vhost_connection_limit(Config, VHost, -1).
+
+single_node_single_vhost_zero_limit(Config) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 0),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config),
+ expect_that_client_connection_is_rejected(Config),
+ expect_that_client_connection_is_rejected(Config),
+
+ set_vhost_connection_limit(Config, VHost, -1),
+ [Conn1, Conn2] = open_connections(Config, [0, 0]),
+
+ close_connections([Conn1, Conn2]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+
+single_node_multiple_vhosts_limit(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ set_vhost_connection_limit(Config, VHost1, 2),
+ set_vhost_connection_limit(Config, VHost2, 2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1, Conn2, Conn3, Conn4] = open_connections(Config, [
+ {0, VHost1},
+ {0, VHost1},
+ {0, VHost2},
+ {0, VHost2}]),
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, VHost1),
+ expect_that_client_connection_is_rejected(Config, 0, VHost2),
+
+ [Conn5] = open_connections(Config, [0]),
+
+ set_vhost_connection_limit(Config, VHost1, 5),
+ set_vhost_connection_limit(Config, VHost2, -10),
+
+ [Conn6, Conn7, Conn8, Conn9, Conn10] = open_connections(Config, [
+ {0, VHost1},
+ {0, VHost1},
+ {0, VHost1},
+ {0, VHost2},
+ {0, VHost2}]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4, Conn5,
+ Conn6, Conn7, Conn8, Conn9, Conn10]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ set_vhost_connection_limit(Config, VHost2, -1),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+
+single_node_multiple_vhosts_zero_limit(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ set_vhost_connection_limit(Config, VHost1, 0),
+ set_vhost_connection_limit(Config, VHost2, 0),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, VHost1),
+ expect_that_client_connection_is_rejected(Config, 0, VHost2),
+ expect_that_client_connection_is_rejected(Config, 0, VHost1),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ [Conn1, Conn2] = open_connections(Config, [{0, VHost1}, {0, VHost1}]),
+
+ close_connections([Conn1, Conn2]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ set_vhost_connection_limit(Config, VHost2, -1).
+
+
+cluster_single_vhost_limit(Config) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ %% here connections are opened to different nodes
+ [Conn1, Conn2] = open_connections(Config, [{0, VHost}, {1, VHost}]),
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, VHost),
+ expect_that_client_connection_is_rejected(Config, 1, VHost),
+
+ set_vhost_connection_limit(Config, VHost, 5),
+
+ [Conn3, Conn4] = open_connections(Config, [{0, VHost}, {0, VHost}]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ set_vhost_connection_limit(Config, VHost, -1).
+
+cluster_single_vhost_limit2(Config) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ %% here a limit is reached on one node first
+ [Conn1, Conn2] = open_connections(Config, [{0, VHost}, {0, VHost}]),
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, VHost),
+ expect_that_client_connection_is_rejected(Config, 1, VHost),
+
+ set_vhost_connection_limit(Config, VHost, 5),
+
+ [Conn3, Conn4, Conn5, {error, not_allowed}] = open_connections(Config, [
+ {1, VHost},
+ {1, VHost},
+ {1, VHost},
+ {1, VHost}]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ set_vhost_connection_limit(Config, VHost, -1).
+
+
+cluster_single_vhost_zero_limit(Config) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 0),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 1),
+ expect_that_client_connection_is_rejected(Config, 0),
+
+ set_vhost_connection_limit(Config, VHost, -1),
+ [Conn1, Conn2, Conn3, Conn4] = open_connections(Config, [0, 1, 0, 1]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ set_vhost_connection_limit(Config, VHost, -1).
+
+
+cluster_multiple_vhosts_zero_limit(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ set_vhost_connection_limit(Config, VHost1, 0),
+ set_vhost_connection_limit(Config, VHost2, 0),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, VHost1),
+ expect_that_client_connection_is_rejected(Config, 0, VHost2),
+ expect_that_client_connection_is_rejected(Config, 1, VHost1),
+ expect_that_client_connection_is_rejected(Config, 1, VHost2),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ set_vhost_connection_limit(Config, VHost2, -1),
+
+ [Conn1, Conn2, Conn3, Conn4] = open_connections(Config, [
+ {0, VHost1},
+ {0, VHost2},
+ {1, VHost1},
+ {1, VHost2}]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ set_vhost_connection_limit(Config, VHost2, -1).
+
+vhost_limit_after_node_renamed(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ VHost = <<"/renaming_node">>,
+ set_up_vhost(Config, VHost),
+ set_vhost_connection_limit(Config, VHost, 2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1, Conn2, {error, not_allowed}] = open_connections(Config,
+ [{0, VHost}, {1, VHost}, {0, VHost}]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+ close_connections([Conn1, Conn2]),
+
+ Config1 = cluster_rename_SUITE:stop_rename_start(Config, A, [A, 'new-A']),
+
+ ?assertEqual(0, count_connections_in(Config1, VHost)),
+
+ [Conn3, Conn4, {error, not_allowed}] = open_connections(Config,
+ [{0, VHost}, {1, VHost}, {0, VHost}]),
+ ?assertEqual(2, count_connections_in(Config1, VHost)),
+ close_connections([Conn3, Conn4]),
+
+ set_vhost_connection_limit(Config1, VHost, -1),
+ {save_config, Config1}.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndVHosts) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, VHost}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ VHost);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndVHosts),
+ timer:sleep(500),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns),
+ timer:sleep(500).
+
+kill_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ (catch exit(Conn, please_terminate))
+ end, Conns),
+ timer:sleep(500).
+
+count_connections_in(Config, VHost) ->
+ count_connections_in(Config, VHost, 0).
+count_connections_in(Config, VHost, NodeIndex) ->
+ timer:sleep(200),
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ count_tracked_items_in, [{vhost, VHost}]).
+
+connections_in(Config, VHost) ->
+ connections_in(Config, 0, VHost).
+connections_in(Config, NodeIndex, VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list, [VHost]).
+
+connections_on_node(Config) ->
+ connections_on_node(Config, 0).
+connections_on_node(Config, NodeIndex) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, NodeIndex, nodename),
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list_on_node, [Node]).
+connections_on_node(Config, NodeIndex, NodeForListing) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list_on_node, [NodeForListing]).
+
+all_connections(Config) ->
+ all_connections(Config, 0).
+all_connections(Config, NodeIndex) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list, []).
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_connection_limit(Config, VHost, -1).
+
+set_vhost_connection_limit(Config, VHost, Count) ->
+ set_vhost_connection_limit(Config, 0, VHost, Count).
+
+set_vhost_connection_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-connections\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+await_running_node_refresh(_Config, _NodeIndex) ->
+ timer:sleep(250).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, VHost) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, VHost).
diff --git a/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl b/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl
new file mode 100644
index 0000000000..2748d95592
--- /dev/null
+++ b/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl
@@ -0,0 +1,150 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_vhost_connection_limit_partitions_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-import(rabbit_ct_client_helpers, [open_unmanaged_connection/2,
+ open_unmanaged_connection/3]).
+
+
+all() ->
+ [
+ {group, net_ticktime_1}
+ ].
+
+groups() ->
+ [
+ {net_ticktime_1, [], [
+ cluster_full_partition_with_autoheal
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% see partitions_SUITE
+-define(DELAY, 12000).
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config, [
+ fun rabbit_ct_broker_helpers:configure_dist_proxy/1
+ ]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(net_ticktime_1 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{net_ticktime, 1}]),
+ init_per_multinode_group(Group, Config1, 3).
+
+init_per_multinode_group(_Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+cluster_full_partition_with_autoheal(Config) ->
+ VHost = <<"/">>,
+ rabbit_ct_broker_helpers:set_partition_handling_mode_globally(Config, autoheal),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% 6 connections, 2 per node
+ Conn1 = open_unmanaged_connection(Config, A),
+ Conn2 = open_unmanaged_connection(Config, A),
+ Conn3 = open_unmanaged_connection(Config, B),
+ Conn4 = open_unmanaged_connection(Config, B),
+ Conn5 = open_unmanaged_connection(Config, C),
+ Conn6 = open_unmanaged_connection(Config, C),
+ wait_for_count_connections_in(Config, VHost, 6, 60000),
+
+ %% B drops off the network, non-reachable by either A or C
+ rabbit_ct_broker_helpers:block_traffic_between(A, B),
+ rabbit_ct_broker_helpers:block_traffic_between(B, C),
+ timer:sleep(?DELAY),
+
+ %% A and C are still connected, so 4 connections are tracked
+ wait_for_count_connections_in(Config, VHost, 4, 60000),
+
+ rabbit_ct_broker_helpers:allow_traffic_between(A, B),
+ rabbit_ct_broker_helpers:allow_traffic_between(B, C),
+ timer:sleep(?DELAY),
+
+ %% during autoheal B's connections were dropped
+ wait_for_count_connections_in(Config, VHost, 4, 60000),
+
+ lists:foreach(fun (Conn) ->
+ (catch rabbit_ct_client_helpers:close_connection(Conn))
+ end, [Conn1, Conn2, Conn3, Conn4,
+ Conn5, Conn6]),
+
+ passed.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+wait_for_count_connections_in(Config, VHost, Expected, Time) when Time =< 0 ->
+ ?assertMatch(Connections when length(Connections) == Expected,
+ connections_in(Config, VHost));
+wait_for_count_connections_in(Config, VHost, Expected, Time) ->
+ case connections_in(Config, VHost) of
+ Connections when length(Connections) == Expected ->
+ ok;
+ _ ->
+ Sleep = 3000,
+ timer:sleep(Sleep),
+ wait_for_count_connections_in(Config, VHost, Expected, Time - Sleep)
+ end.
+
+count_connections_in(Config, VHost) ->
+ count_connections_in(Config, VHost, 0).
+count_connections_in(Config, VHost, NodeIndex) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ count_tracked_items_in, [{vhost, VHost}]).
+
+connections_in(Config, VHost) ->
+ connections_in(Config, 0, VHost).
+connections_in(Config, NodeIndex, VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list, [VHost]).
diff --git a/deps/rabbit/test/per_vhost_msg_store_SUITE.erl b/deps/rabbit/test/per_vhost_msg_store_SUITE.erl
new file mode 100644
index 0000000000..8364d69462
--- /dev/null
+++ b/deps/rabbit/test/per_vhost_msg_store_SUITE.erl
@@ -0,0 +1,245 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_vhost_msg_store_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(MSGS_COUNT, 100).
+
+all() ->
+ [
+ publish_to_different_dirs,
+ storage_deleted_on_vhost_delete,
+ single_vhost_storage_delete_is_safe
+ ].
+
+
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodename_suffix, ?MODULE}]),
+ Config2 = rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit, [{queue_index_embed_msgs_below, 100}]}),
+ rabbit_ct_helpers:run_setup_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(_, Config) ->
+ Vhost1 = <<"vhost1">>,
+ Vhost2 = <<"vhost2">>,
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost1),
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost2),
+ Chan1 = open_channel(Vhost1, Config),
+ Chan2 = open_channel(Vhost2, Config),
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{vhost1, Vhost1}, {vhost2, Vhost2},
+ {channel1, Chan1}, {channel2, Chan2}]).
+
+end_per_testcase(single_vhost_storage_delete_is_safe, Config) ->
+ Config;
+end_per_testcase(_, Config) ->
+ Vhost1 = ?config(vhost1, Config),
+ Vhost2 = ?config(vhost2, Config),
+ rabbit_ct_broker_helpers:delete_vhost(Config, Vhost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, Vhost2),
+ Config.
+
+publish_to_different_dirs(Config) ->
+ Vhost1 = ?config(vhost1, Config),
+ Vhost2 = ?config(vhost2, Config),
+ Channel1 = ?config(channel1, Config),
+ Channel2 = ?config(channel2, Config),
+ Queue1 = declare_durable_queue(Channel1),
+ Queue2 = declare_durable_queue(Channel2),
+ FolderSize1 = get_folder_size(Vhost1, Config),
+ FolderSize2 = get_folder_size(Vhost2, Config),
+
+ %% Publish message to a queue index
+ publish_persistent_messages(index, Channel1, Queue1),
+ %% First storage increased
+ FolderSize11 = get_folder_size(Vhost1, Config),
+ true = (FolderSize1 < FolderSize11),
+ %% Second storage didn't increased
+ FolderSize2 = get_folder_size(Vhost2, Config),
+
+ %% Publish message to a message store
+ publish_persistent_messages(store, Channel1, Queue1),
+ %% First storage increased
+ FolderSize12 = get_folder_size(Vhost1, Config),
+ true = (FolderSize11 < FolderSize12),
+ %% Second storage didn't increased
+ FolderSize2 = get_folder_size(Vhost2, Config),
+
+ %% Publish message to a queue index
+ publish_persistent_messages(index, Channel2, Queue2),
+ %% First storage increased
+ FolderSize21 = get_folder_size(Vhost2, Config),
+ true = (FolderSize2 < FolderSize21),
+ %% Second storage didn't increased
+ FolderSize12 = get_folder_size(Vhost1, Config),
+
+ %% Publish message to a message store
+ publish_persistent_messages(store, Channel2, Queue2),
+ %% Second storage increased
+ FolderSize22 = get_folder_size(Vhost2, Config),
+ true = (FolderSize21 < FolderSize22),
+ %% First storage didn't increased
+ FolderSize12 = get_folder_size(Vhost1, Config).
+
+storage_deleted_on_vhost_delete(Config) ->
+ Vhost1 = ?config(vhost1, Config),
+ Channel1 = ?config(channel1, Config),
+ Queue1 = declare_durable_queue(Channel1),
+ FolderSize = get_global_folder_size(Config),
+
+ publish_persistent_messages(index, Channel1, Queue1),
+ publish_persistent_messages(store, Channel1, Queue1),
+ FolderSizeAfterPublish = get_global_folder_size(Config),
+
+ %% Total storage size increased
+ true = (FolderSize < FolderSizeAfterPublish),
+
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, Vhost1),
+
+ %% Total memory reduced
+ FolderSizeAfterDelete = get_global_folder_size(Config),
+ true = (FolderSizeAfterPublish > FolderSizeAfterDelete),
+
+ %% There is no Vhost1 folder
+ 0 = get_folder_size(Vhost1, Config).
+
+
+single_vhost_storage_delete_is_safe(Config) ->
+ct:pal("Start test 3", []),
+ Vhost1 = ?config(vhost1, Config),
+ Vhost2 = ?config(vhost2, Config),
+ Channel1 = ?config(channel1, Config),
+ Channel2 = ?config(channel2, Config),
+ Queue1 = declare_durable_queue(Channel1),
+ Queue2 = declare_durable_queue(Channel2),
+
+ %% Publish messages to both stores
+ publish_persistent_messages(index, Channel1, Queue1),
+ publish_persistent_messages(store, Channel1, Queue1),
+ publish_persistent_messages(index, Channel2, Queue2),
+ publish_persistent_messages(store, Channel2, Queue2),
+
+ queue_is_not_empty(Channel2, Queue2),
+ % Vhost2Dir = vhost_dir(Vhost2, Config),
+ % [StoreFile] = filelib:wildcard(binary_to_list(filename:join([Vhost2Dir, "msg_store_persistent_*", "0.rdq"]))),
+ % ct:pal("Store file ~p~n", [file:read_file(StoreFile)]).
+% ok.
+ rabbit_ct_broker_helpers:stop_broker(Config, 0),
+ delete_vhost_data(Vhost1, Config),
+ rabbit_ct_broker_helpers:start_broker(Config, 0),
+
+ Channel11 = open_channel(Vhost1, Config),
+ Channel21 = open_channel(Vhost2, Config),
+
+ %% There are no Vhost1 messages
+ queue_is_empty(Channel11, Queue1),
+
+ %% But Vhost2 messages are in place
+ queue_is_not_empty(Channel21, Queue2),
+ consume_messages(index, Channel21, Queue2),
+ consume_messages(store, Channel21, Queue2).
+
+declare_durable_queue(Channel) ->
+ QName = list_to_binary(erlang:ref_to_list(make_ref())),
+ #'queue.declare_ok'{queue = QName} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{queue = QName, durable = true}),
+ QName.
+
+publish_persistent_messages(Storage, Channel, Queue) ->
+ MessagePayload = case Storage of
+ index -> binary:copy(<<"=">>, 50);
+ store -> binary:copy(<<"-">>, 150)
+ end,
+ amqp_channel:call(Channel, #'confirm.select'{}),
+ [amqp_channel:call(Channel,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = MessagePayload})
+ || _ <- lists:seq(1, ?MSGS_COUNT)],
+ amqp_channel:wait_for_confirms(Channel).
+
+
+get_folder_size(Vhost, Config) ->
+ Dir = vhost_dir(Vhost, Config),
+ folder_size(Dir).
+
+folder_size(Dir) ->
+ filelib:fold_files(Dir, ".*", true,
+ fun(F,Acc) -> filelib:file_size(F) + Acc end, 0).
+
+get_global_folder_size(Config) ->
+ BaseDir = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mnesia, dir, []),
+ folder_size(BaseDir).
+
+vhost_dir(Vhost, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_vhost, msg_store_dir_path, [Vhost]).
+
+delete_vhost_data(Vhost, Config) ->
+ Dir = vhost_dir(Vhost, Config),
+ rabbit_file:recursive_delete([Dir]).
+
+queue_is_empty(Channel, Queue) ->
+ #'queue.declare_ok'{queue = Queue, message_count = 0} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{ queue = Queue,
+ durable = true,
+ passive = true}).
+
+queue_is_not_empty(Channel, Queue) ->
+ #'queue.declare_ok'{queue = Queue, message_count = MsgCount} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{ queue = Queue,
+ durable = true,
+ passive = true}),
+ ExpectedCount = ?MSGS_COUNT * 2,
+ ExpectedCount = MsgCount.
+
+consume_messages(Storage, Channel, Queue) ->
+ MessagePayload = case Storage of
+ index -> binary:copy(<<"=">>, 50);
+ store -> binary:copy(<<"-">>, 150)
+ end,
+ lists:foreach(
+ fun(I) ->
+ ct:pal("Consume message ~p~n ~p~n", [I, MessagePayload]),
+ {#'basic.get_ok'{}, Content} =
+ amqp_channel:call(Channel,
+ #'basic.get'{queue = Queue,
+ no_ack = true}),
+ #amqp_msg{payload = MessagePayload} = Content
+ end,
+ lists:seq(1, ?MSGS_COUNT)),
+ ok.
+
+open_channel(Vhost, Config) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, Conn} = amqp_connection:start(
+ #amqp_params_direct{node = Node, virtual_host = Vhost}),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Chan.
diff --git a/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl b/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl
new file mode 100644
index 0000000000..28a9f98537
--- /dev/null
+++ b/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl
@@ -0,0 +1,682 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_vhost_queue_limit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-import(rabbit_ct_client_helpers, [open_unmanaged_connection/3,
+ close_connection_and_channel/2]).
+
+all() ->
+ [
+ {group, cluster_size_1}
+ , {group, cluster_size_2}
+ ].
+
+groups() ->
+ [
+ {cluster_size_1, [], [
+ most_basic_single_node_queue_count,
+ single_node_single_vhost_queue_count,
+ single_node_multiple_vhosts_queue_count,
+ single_node_single_vhost_limit,
+ single_node_single_vhost_zero_limit,
+ single_node_single_vhost_limit_with_durable_named_queue,
+ single_node_single_vhost_zero_limit_with_durable_named_queue,
+ single_node_single_vhost_limit_with_queue_ttl,
+ single_node_single_vhost_limit_with_redeclaration
+ ]},
+ {cluster_size_2, [], [
+ most_basic_cluster_queue_count,
+ cluster_multiple_vhosts_queue_count,
+ cluster_multiple_vhosts_limit,
+ cluster_multiple_vhosts_zero_limit,
+ cluster_multiple_vhosts_limit_with_durable_named_queue,
+ cluster_multiple_vhosts_zero_limit_with_durable_named_queue,
+ cluster_node_restart_queue_count
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1, Config) ->
+ init_per_multinode_group(cluster_size_1, Config, 1);
+init_per_group(cluster_size_2, Config) ->
+ init_per_multinode_group(cluster_size_2, Config, 2);
+init_per_group(cluster_rename, Config) ->
+ init_per_multinode_group(cluster_rename, Config, 2).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ case Group of
+ cluster_rename ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config1;
+ _ ->
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps())
+ end.
+
+end_per_group(cluster_rename, Config) ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config.
+
+end_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) ->
+ Config1 = ?config(save_config, Config),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+most_basic_single_node_queue_count(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+ Conn = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ declare_exclusive_queues(Ch, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost)),
+ close_connection_and_channel(Conn, Ch),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+single_node_single_vhost_queue_count(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+ Conn = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ declare_exclusive_queues(Ch, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost)),
+ declare_durable_queues(Ch, 10),
+ ?assertEqual(20, count_queues_in(Config, VHost)),
+ delete_durable_queues(Ch, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost)),
+ close_connection_and_channel(Conn, Ch),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+single_node_multiple_vhosts_queue_count(Config) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ Conn2 = open_unmanaged_connection(Config, 0, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ declare_exclusive_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost1)),
+ declare_durable_queues(Ch1, 10),
+ ?assertEqual(20, count_queues_in(Config, VHost1)),
+ delete_durable_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost1)),
+ declare_exclusive_queues(Ch2, 30),
+ ?assertEqual(30, count_queues_in(Config, VHost2)),
+ close_connection_and_channel(Conn1, Ch1),
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ close_connection_and_channel(Conn2, Ch2),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+single_node_single_vhost_limit(Config) ->
+ single_node_single_vhost_limit_with(Config, 5),
+ single_node_single_vhost_limit_with(Config, 10).
+single_node_single_vhost_zero_limit(Config) ->
+ single_node_single_vhost_zero_limit_with(Config, #'queue.declare'{queue = <<"">>,
+ exclusive = true}).
+
+single_node_single_vhost_limit_with_durable_named_queue(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ set_vhost_queue_limit(Config, VHost, 3),
+ Conn = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+single_node_single_vhost_zero_limit_with_durable_named_queue(Config) ->
+ single_node_single_vhost_zero_limit_with(Config, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true}).
+
+single_node_single_vhost_limit_with(Config, WatermarkLimit) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ set_vhost_queue_limit(Config, VHost, 3),
+ Conn = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ set_vhost_queue_limit(Config, VHost, WatermarkLimit),
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, WatermarkLimit)),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+single_node_single_vhost_zero_limit_with(Config, QueueDeclare) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ set_vhost_queue_limit(Config, VHost, 0),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, QueueDeclare)
+ end),
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ %% lift the limit
+ set_vhost_queue_limit(Config, VHost, -1),
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, 100)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+
+single_node_single_vhost_limit_with_queue_ttl(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ set_vhost_queue_limit(Config, VHost, 3),
+
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"">>,
+ exclusive = true,
+ arguments = [{<<"x-expires">>, long, 2000}]})
+ end, lists:seq(1, 3)),
+
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end),
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ %% wait for the queues to expire
+ timer:sleep(3000),
+
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"">>,
+ exclusive = true}),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+
+single_node_single_vhost_limit_with_redeclaration(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ set_vhost_queue_limit(Config, VHost, 3),
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ %% can't declare a new queue...
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true})
+ end),
+
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ %% ...but re-declarations succeed
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+
+most_basic_cluster_queue_count(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost, 1)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ declare_exclusive_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(10, count_queues_in(Config, VHost, 1)),
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ declare_exclusive_queues(Ch2, 15),
+ ?assertEqual(25, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(25, count_queues_in(Config, VHost, 1)),
+ close_connection_and_channel(Conn1, Ch1),
+ close_connection_and_channel(Conn2, Ch2),
+ ?assertEqual(0, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost, 1)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+cluster_node_restart_queue_count(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost, 1)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ declare_exclusive_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(10, count_queues_in(Config, VHost, 1)),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 0),
+ ?assertEqual(0, count_queues_in(Config, VHost, 0)),
+
+ Conn2 = open_unmanaged_connection(Config, 1, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ declare_exclusive_queues(Ch2, 15),
+ ?assertEqual(15, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(15, count_queues_in(Config, VHost, 1)),
+
+ declare_durable_queues(Ch2, 10),
+ ?assertEqual(25, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(25, count_queues_in(Config, VHost, 1)),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 1),
+
+ ?assertEqual(10, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(10, count_queues_in(Config, VHost, 1)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+
+cluster_multiple_vhosts_queue_count(Config) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ declare_exclusive_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost1, 0)),
+ ?assertEqual(10, count_queues_in(Config, VHost1, 1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost2, 1)),
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ declare_exclusive_queues(Ch2, 15),
+ ?assertEqual(15, count_queues_in(Config, VHost2, 0)),
+ ?assertEqual(15, count_queues_in(Config, VHost2, 1)),
+ close_connection_and_channel(Conn1, Ch1),
+ close_connection_and_channel(Conn2, Ch2),
+ ?assertEqual(0, count_queues_in(Config, VHost1, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost1, 1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost2, 1)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+cluster_multiple_vhosts_limit(Config) ->
+ cluster_multiple_vhosts_limit_with(Config, 10),
+ cluster_multiple_vhosts_limit_with(Config, 20).
+
+cluster_multiple_vhosts_limit_with(Config, WatermarkLimit) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ set_vhost_queue_limit(Config, VHost1, 3),
+ set_vhost_queue_limit(Config, VHost2, 3),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ set_vhost_queue_limit(Config, VHost1, WatermarkLimit),
+
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, WatermarkLimit)),
+
+ Conn2 = open_unmanaged_connection(Config, 1, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ set_vhost_queue_limit(Config, VHost2, WatermarkLimit),
+
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, WatermarkLimit)),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end),
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+
+cluster_multiple_vhosts_zero_limit(Config) ->
+ cluster_multiple_vhosts_zero_limit_with(Config, #'queue.declare'{queue = <<"">>,
+ exclusive = true}).
+
+cluster_multiple_vhosts_limit_with_durable_named_queue(Config) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ set_vhost_queue_limit(Config, VHost1, 3),
+ set_vhost_queue_limit(Config, VHost2, 3),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ Conn2 = open_unmanaged_connection(Config, 1, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true})
+ end),
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+cluster_multiple_vhosts_zero_limit_with_durable_named_queue(Config) ->
+ cluster_multiple_vhosts_zero_limit_with(Config, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true}).
+
+cluster_multiple_vhosts_zero_limit_with(Config, QueueDeclare) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ Conn2 = open_unmanaged_connection(Config, 1, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ set_vhost_queue_limit(Config, VHost1, 0),
+ set_vhost_queue_limit(Config, VHost2, 0),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, QueueDeclare)
+ end),
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch2, QueueDeclare)
+ end),
+
+
+ Conn3 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch3} = amqp_connection:open_channel(Conn3),
+ Conn4 = open_unmanaged_connection(Config, 1, VHost2),
+ {ok, Ch4} = amqp_connection:open_channel(Conn4),
+
+ %% lift the limits
+ set_vhost_queue_limit(Config, VHost1, -1),
+ set_vhost_queue_limit(Config, VHost2, -1),
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch3, #'queue.declare'{queue = <<"">>,
+ exclusive = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch4, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, 400)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_queue_limit(Config, VHost, -1).
+
+set_vhost_queue_limit(Config, VHost, Count) ->
+ set_vhost_queue_limit(Config, 0, VHost, Count).
+
+set_vhost_queue_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-queues\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+count_queues_in(Config, VHost) ->
+ count_queues_in(Config, VHost, 0).
+count_queues_in(Config, VHost, NodeIndex) ->
+ timer:sleep(200),
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_amqqueue,
+ count, [VHost]).
+
+declare_exclusive_queues(Ch, N) ->
+ lists:foreach(fun (_) ->
+ amqp_channel:call(Ch,
+ #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end,
+ lists:seq(1, N)).
+
+declare_durable_queues(Ch, N) ->
+ lists:foreach(fun (I) ->
+ amqp_channel:call(Ch,
+ #'queue.declare'{queue = durable_queue_name(I),
+ exclusive = false,
+ durable = true})
+ end,
+ lists:seq(1, N)).
+
+delete_durable_queues(Ch, N) ->
+ lists:foreach(fun (I) ->
+ amqp_channel:call(Ch,
+ #'queue.delete'{queue = durable_queue_name(I)})
+ end,
+ lists:seq(1, N)).
+
+durable_queue_name(N) when is_integer(N) ->
+ iolist_to_binary(io_lib:format("queue-limits-durable-~p", [N])).
+
+expect_shutdown_due_to_precondition_failed(Thunk) ->
+ try
+ Thunk(),
+ ok
+ catch _:{{shutdown, {server_initiated_close, 406, _}}, _} ->
+ %% expected
+ ok
+ end.
diff --git a/deps/rabbit/test/policy_SUITE.erl b/deps/rabbit/test/policy_SUITE.erl
new file mode 100644
index 0000000000..ce68332d77
--- /dev/null
+++ b/deps/rabbit/test/policy_SUITE.erl
@@ -0,0 +1,204 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(policy_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ policy_ttl,
+ operator_policy_ttl,
+ operator_retroactive_policy_ttl,
+ operator_retroactive_policy_publish_ttl
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:setup_steps(),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:teardown_steps(),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+policy_ttl(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"policy_ttl-queue">>,
+ rabbit_ct_broker_helpers:set_policy(Config, 0, <<"ttl-policy">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 20}]),
+
+ declare(Ch, Q),
+ publish(Ch, Q, lists:seq(1, 20)),
+ timer:sleep(50),
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+
+ rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl-policy">>),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+operator_policy_ttl(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"policy_ttl-queue">>,
+ % Operator policy will override
+ rabbit_ct_broker_helpers:set_policy(Config, 0, <<"ttl-policy">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 100000}]),
+ rabbit_ct_broker_helpers:set_operator_policy(Config, 0, <<"ttl-policy-op">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 1}]),
+
+ declare(Ch, Q),
+ publish(Ch, Q, lists:seq(1, 50)),
+ timer:sleep(50),
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+
+ rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl-policy">>),
+ rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"ttl-policy-op">>),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+operator_retroactive_policy_ttl(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"policy_ttl-queue">>,
+ declare(Ch, Q),
+ publish(Ch, Q, lists:seq(1, 50)),
+ % Operator policy will override
+ rabbit_ct_broker_helpers:set_operator_policy(Config, 0, <<"ttl-policy-op">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 1}]),
+
+ %% Old messages are not expired
+ timer:sleep(50),
+ get_messages(50, Ch, Q),
+ delete(Ch, Q),
+
+ rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"ttl-policy-op">>),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+operator_retroactive_policy_publish_ttl(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"policy_ttl-queue">>,
+ declare(Ch, Q),
+ publish(Ch, Q, lists:seq(1, 50)),
+ % Operator policy will override
+ rabbit_ct_broker_helpers:set_operator_policy(Config, 0, <<"ttl-policy-op">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 1}]),
+
+ %% Old messages are not expired, new ones only expire when they get to the head of
+ %% the queue
+ publish(Ch, Q, lists:seq(1, 25)),
+ timer:sleep(50),
+ [[<<"policy_ttl-queue">>, <<"75">>]] =
+ rabbit_ct_broker_helpers:rabbitmqctl_list(Config, 0, ["list_queues", "--no-table-headers"]),
+ get_messages(50, Ch, Q),
+ delete(Ch, Q),
+
+ rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"ttl-policy-op">>),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+%%----------------------------------------------------------------------------
+
+
+declare(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true}).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P) || P <- Ps],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish1(Ch, Q, P) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = erlang:md5(term_to_binary(P))}).
+
+publish1(Ch, Q, P, Pd) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = Pd}).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P) -> #'P_basic'{priority = P,
+ delivery_mode = 2}.
+
+consume(Ch, Q, Ack) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = Ack =:= no_ack,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+get_empty(Ch, Q) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_messages(0, Ch, Q) ->
+ get_empty(Ch, Q);
+get_messages(Number, Ch, Q) ->
+ case amqp_channel:call(Ch, #'basic.get'{queue = Q}) of
+ {#'basic.get_ok'{}, _} ->
+ get_messages(Number - 1, Ch, Q);
+ #'basic.get_empty'{} ->
+ exit(failed)
+ end.
+
+%%----------------------------------------------------------------------------
diff --git a/deps/rabbit/test/priority_queue_SUITE.erl b/deps/rabbit/test/priority_queue_SUITE.erl
new file mode 100644
index 0000000000..a0c1732ffd
--- /dev/null
+++ b/deps/rabbit/test/priority_queue_SUITE.erl
@@ -0,0 +1,771 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(priority_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ ackfold,
+ drop,
+ {overflow_reject_publish, [], [reject]},
+ {overflow_reject_publish_dlx, [], [reject]},
+ dropwhile_fetchwhile,
+ info_head_message_timestamp,
+ matching,
+ mirror_queue_sync,
+ mirror_queue_sync_priority_above_max,
+ mirror_queue_sync_priority_above_max_pending_ack,
+ mirror_queue_sync_order,
+ purge,
+ requeue,
+ resume,
+ simple_order,
+ straight_through,
+ invoke,
+ gen_server2_stats,
+ negative_max_priorities,
+ max_priorities_above_hard_limit
+ ]},
+ {cluster_size_3, [], [
+ mirror_queue_auto_ack,
+ mirror_fast_reset_policy,
+ mirror_reset_policy,
+ mirror_stop_pending_followers
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_group(cluster_size_3, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_group(overflow_reject_publish, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish">>}
+ ]);
+init_per_group(overflow_reject_publish_dlx, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish-dlx">>}
+ ]).
+
+end_per_group(overflow_reject_publish, _Config) ->
+ ok;
+end_per_group(overflow_reject_publish_dlx, _Config) ->
+ ok;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:setup_steps(),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:teardown_steps(),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% The BQ API is used in all sorts of places in all sorts of
+%% ways. Therefore we have to jump through a few different hoops
+%% in order to integration-test it.
+%%
+%% * start/1, stop/0, init/3, terminate/2, delete_and_terminate/2
+%% - starting and stopping rabbit. durable queues / persistent msgs needed
+%% to test recovery
+%%
+%% * publish/5, drain_confirmed/1, fetch/2, ack/2, is_duplicate/2, msg_rates/1,
+%% needs_timeout/1, timeout/1, invoke/3, resume/1 [0]
+%% - regular publishing and consuming, with confirms and acks and durability
+%%
+%% * publish_delivered/4 - publish with acks straight through
+%% * discard/3 - publish without acks straight through
+%% * dropwhile/2 - expire messages without DLX
+%% * fetchwhile/4 - expire messages with DLX
+%% * ackfold/4 - reject messages with DLX
+%% * requeue/2 - reject messages without DLX
+%% * drop/2 - maxlen messages without DLX
+%% * purge/1 - issue AMQP queue.purge
+%% * purge_acks/1 - mirror queue explicit sync with unacked msgs
+%% * fold/3 - mirror queue explicit sync
+%% * depth/1 - mirror queue implicit sync detection
+%% * len/1, is_empty/1 - info items
+%% * handle_pre_hibernate/1 - hibernation
+%%
+%% * set_ram_duration_target/2, ram_duration/1, status/1
+%% - maybe need unit testing?
+%%
+%% [0] publish enough to get credit flow from msg store
+
+simple_order(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"simple_order-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ publish(Ch, Q, [2, 3, 1, 2, 3, 1, 2, 3, 1]),
+ get_all(Ch, Q, no_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ publish(Ch, Q, [3, 1, 2, 3, 1, 2, 3, 1, 2]),
+ get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+matching(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"matching-queue">>,
+ declare(Ch, Q, 5),
+ %% We round priority down, and 0 is the default
+ publish(Ch, Q, [undefined, 0, 5, 10, undefined]),
+ get_all(Ch, Q, do_ack, [5, 10, undefined, 0, undefined]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+resume(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"resume-queue">>,
+ declare(Ch, Q, 5),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ publish_many(Ch, Q, 10000),
+ amqp_channel:wait_for_confirms(Ch),
+ amqp_channel:call(Ch, #'queue.purge'{queue = Q}), %% Assert it exists
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+straight_through(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"straight_through-queue">>,
+ declare(Ch, Q, 3),
+ [begin
+ consume(Ch, Q, Ack),
+ [begin
+ publish1(Ch, Q, P),
+ assert_delivered(Ch, Ack, P)
+ end || P <- [1, 2, 3]],
+ cancel(Ch)
+ end || Ack <- [do_ack, no_ack]],
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+max_priorities_above_hard_limit(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"max_priorities_above_hard_limit">>,
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ %% Note that lower values (e.g. 300) will overflow the byte type here.
+ %% However, values >= 256 would still be rejected when used by
+ %% other clients
+ declare(Ch, Q, 3000)),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+negative_max_priorities(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"negative_max_priorities">>,
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ declare(Ch, Q, -10)),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+
+invoke(Config) ->
+ %% Synthetic test to check the invoke callback, as the bug tested here
+ %% is only triggered with a race condition.
+ %% When mirroring is stopped, the backing queue of rabbit_amqqueue_process
+ %% changes from rabbit_mirror_queue_master to rabbit_priority_queue,
+ %% which shouldn't receive any invoke call. However, there might
+ %% be pending messages so the priority queue receives the
+ %% `run_backing_queue` cast message sent to the old master.
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"invoke-queue">>,
+ declare(Ch, Q, 3),
+ Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, gen_server, cast,
+ [Pid,
+ {run_backing_queue, ?MODULE, fun(_, _) -> ok end}]),
+ Pid2 = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ Pid = Pid2,
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+
+gen_server2_stats(Config) ->
+ %% Synthetic test to check the invoke callback, as the bug tested here
+ %% is only triggered with a race condition.
+ %% When mirroring is stopped, the backing queue of rabbit_amqqueue_process
+ %% changes from rabbit_mirror_queue_master to rabbit_priority_queue,
+ %% which shouldn't receive any invoke call. However, there might
+ %% be pending messages so the priority queue receives the
+ %% `run_backing_queue` cast message sent to the old master.
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"gen_server2_stats_queue">>,
+ declare(Ch, Q, 3),
+ Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ Metrics = rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit_core_metrics, get_gen_server2_stats,
+ [Pid]),
+ true = is_number(Metrics),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+dropwhile_fetchwhile(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"dropwhile_fetchwhile-queue">>,
+ [begin
+ declare(Ch, Q, Args ++ arguments(3)),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ timer:sleep(10),
+ get_empty(Ch, Q),
+ delete(Ch, Q)
+ end ||
+ Args <- [[{<<"x-message-ttl">>, long, 1}],
+ [{<<"x-message-ttl">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<"amq.fanout">>}]
+ ]],
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+ackfold(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"ackfolq-queue1">>,
+ Q2 = <<"ackfold-queue2">>,
+ declare(Ch, Q,
+ [{<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, Q2}
+ | arguments(3)]),
+ declare(Ch, Q2, none),
+ publish(Ch, Q, [1, 2, 3]),
+ [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = true,
+ requeue = false}),
+ timer:sleep(100),
+ get_all(Ch, Q2, do_ack, [3, 2, 1]),
+ delete(Ch, Q),
+ delete(Ch, Q2),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+requeue(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"requeue-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = true,
+ requeue = true}),
+ get_all(Ch, Q, do_ack, [3, 2, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+drop(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"drop-queue">>,
+ declare(Ch, Q, [{<<"x-max-length">>, long, 4} | arguments(3)]),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ %% We drop from the head, so this is according to the "spec" even
+ %% if not likely to be what the user wants.
+ get_all(Ch, Q, do_ack, [2, 1, 1, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+reject(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ XOverflow = ?config(overflow, Config),
+ Q = <<"reject-queue-", XOverflow/binary>>,
+ declare(Ch, Q, [{<<"x-max-length">>, long, 4},
+ {<<"x-overflow">>, longstr, XOverflow}
+ | arguments(3)]),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ %% First 4 messages are published, all others are discarded.
+ get_all(Ch, Q, do_ack, [3, 2, 1, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+purge(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"purge-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ amqp_channel:call(Ch, #'queue.purge'{queue = Q}),
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+info_head_message_timestamp(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, info_head_message_timestamp1, [Config]).
+
+info_head_message_timestamp1(_Config) ->
+ QName = rabbit_misc:r(<<"/">>, queue,
+ <<"info_head_message_timestamp-queue">>),
+ Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+ Q1 = amqqueue:set_arguments(Q0, [{<<"x-max-priority">>, long, 2}]),
+ PQ = rabbit_priority_queue,
+ BQS1 = PQ:init(Q1, new, fun(_, _) -> ok end),
+ %% The queue is empty: no timestamp.
+ true = PQ:is_empty(BQS1),
+ '' = PQ:info(head_message_timestamp, BQS1),
+ %% Publish one message with timestamp 1000.
+ Msg1 = #basic_message{
+ id = msg1,
+ content = #content{
+ properties = #'P_basic'{
+ priority = 1,
+ timestamp = 1000
+ }},
+ is_persistent = false
+ },
+ BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(),
+ noflow, BQS1),
+ 1000 = PQ:info(head_message_timestamp, BQS2),
+ %% Publish a higher priority message with no timestamp.
+ Msg2 = #basic_message{
+ id = msg2,
+ content = #content{
+ properties = #'P_basic'{
+ priority = 2
+ }},
+ is_persistent = false
+ },
+ BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(),
+ noflow, BQS2),
+ '' = PQ:info(head_message_timestamp, BQS3),
+ %% Consume message with no timestamp.
+ {{Msg2, _, _}, BQS4} = PQ:fetch(false, BQS3),
+ 1000 = PQ:info(head_message_timestamp, BQS4),
+ %% Consume message with timestamp 1000, but do not acknowledge it
+ %% yet. The goal is to verify that the unacknowledged message's
+ %% timestamp is returned.
+ {{Msg1, _, AckTag}, BQS5} = PQ:fetch(true, BQS4),
+ 1000 = PQ:info(head_message_timestamp, BQS5),
+ %% Ack message. The queue is empty now.
+ {[msg1], BQS6} = PQ:ack([AckTag], BQS5),
+ true = PQ:is_empty(BQS6),
+ '' = PQ:info(head_message_timestamp, BQS6),
+ PQ:delete_and_terminate(a_whim, BQS6),
+ passed.
+
+ram_duration(_Config) ->
+ QName = rabbit_misc:r(<<"/">>, queue, <<"ram_duration-queue">>),
+ Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+ Q1 = amqqueue:set_arguments(Q0, [{<<"x-max-priority">>, long, 5}]),
+ PQ = rabbit_priority_queue,
+ BQS1 = PQ:init(Q1, new, fun(_, _) -> ok end),
+ {_Duration1, BQS2} = PQ:ram_duration(BQS1),
+ BQS3 = PQ:set_ram_duration_target(infinity, BQS2),
+ BQS4 = PQ:set_ram_duration_target(1, BQS3),
+ {_Duration2, BQS5} = PQ:ram_duration(BQS4),
+ PQ:delete_and_terminate(a_whim, BQS5),
+ passed.
+
+mirror_queue_sync(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"mirror_queue_sync-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0,
+ <<"^mirror_queue_sync-queue$">>, <<"all">>),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3]),
+ %% master now has 9, mirror 6.
+ get_partial(Ch, Q, manual_ack, [3, 3, 3, 2, 2, 2]),
+ %% So some but not all are unacked at the mirror
+ Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:control_action(sync_queue, Nodename0,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, Nodename0, rabbit_misc:r(<<"/">>, queue, Q)),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_queue_sync_priority_above_max(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ %% Tests synchronisation of mirrors when priority is higher than max priority.
+ %% This causes an infinity loop (and test timeout) before rabbitmq-server-795
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_queue_sync_priority_above_max-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [5, 5, 5]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ rabbit_ct_broker_helpers:control_action(sync_queue, A,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_queue_sync_priority_above_max_pending_ack(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Tests synchronisation of mirrors when priority is higher than max priority
+ %% and there are pending acks.
+ %% This causes an infinity loop (and test timeout) before rabbitmq-server-795
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_queue_sync_priority_above_max_pending_ack-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [5, 5, 5]),
+ %% Consume but 'forget' to acknowledge
+ get_without_ack(Ch, Q),
+ get_without_ack(Ch, Q),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ rabbit_ct_broker_helpers:control_action(sync_queue, A,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ synced_msgs(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 3),
+ synced_msgs(Config, B, rabbit_misc:r(<<"/">>, queue, Q), 3),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_queue_auto_ack(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ %% Check correct use of AckRequired in the notifications to the mirrors.
+ %% If mirrors are notified with AckRequired == true when it is false,
+ %% the mirrors will crash with the depth notification as they will not
+ %% match the master delta.
+ %% Bug rabbitmq-server 687
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_queue_auto_ack-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ get_partial(Ch, Q, no_ack, [3, 2, 1]),
+
+ %% Retrieve mirrors
+ SPids = slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ [{SNode1, _SPid1}, {SNode2, SPid2}] = nodes_and_pids(SPids),
+
+ %% Restart one of the mirrors so `request_depth` is triggered
+ rabbit_ct_broker_helpers:restart_node(Config, SNode1),
+
+ %% The alive mirror must have the same pid after its neighbour is restarted
+ timer:sleep(3000), %% ugly but we can't know when the `depth` instruction arrives
+ Slaves = nodes_and_pids(slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q))),
+ SPid2 = proplists:get_value(SNode2, Slaves),
+
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_queue_sync_order(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ {Conn2, Ch2} = rabbit_ct_client_helpers:open_connection_and_channel(Config, B),
+ Q = <<"mirror_queue_sync_order-queue">>,
+ declare(Ch, Q, 3),
+ publish_payload(Ch, Q, [{1, <<"msg1">>}, {2, <<"msg2">>},
+ {2, <<"msg3">>}, {2, <<"msg4">>},
+ {3, <<"msg5">>}]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ %% Add and sync mirror
+ ok = rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"^mirror_queue_sync_order-queue$">>, <<"all">>),
+ rabbit_ct_broker_helpers:control_action(sync_queue, A,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+
+ %% Stop the master
+ rabbit_ct_broker_helpers:stop_node(Config, A),
+
+ get_payload(Ch2, Q, do_ack, [<<"msg5">>, <<"msg2">>, <<"msg3">>,
+ <<"msg4">>, <<"msg1">>]),
+
+ delete(Ch2, Q),
+ rabbit_ct_broker_helpers:start_node(Config, A),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ rabbit_ct_client_helpers:close_connection(Conn2),
+ passed.
+
+mirror_reset_policy(Config) ->
+ %% Gives time to the master to go through all stages.
+ %% Might eventually trigger some race conditions from #802,
+ %% although for that I would expect a longer run and higher
+ %% number of messages in the system.
+ mirror_reset_policy(Config, 5000).
+
+mirror_fast_reset_policy(Config) ->
+ %% This test seems to trigger the bug tested in invoke/1, but it
+ %% cannot guarantee it will always happen. Thus, both tests
+ %% should stay in the test suite.
+ mirror_reset_policy(Config, 5).
+
+
+mirror_reset_policy(Config, Wait) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_reset_policy-queue">>,
+ declare(Ch, Q, 5),
+ Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ publish_many(Ch, Q, 20000),
+ [begin
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"^mirror_reset_policy-queue$">>, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ timer:sleep(Wait),
+ rabbit_ct_broker_helpers:clear_policy(
+ Config, A, <<"^mirror_reset_policy-queue$">>),
+ timer:sleep(Wait)
+ end || _ <- lists:seq(1, 10)],
+ timer:sleep(1000),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"^mirror_reset_policy-queue$">>, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 2),
+ %% Verify master has not crashed
+ Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ delete(Ch, Q),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_stop_pending_followers(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ C = rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename),
+
+ [ok = rabbit_ct_broker_helpers:rpc(
+ Config, Nodename, application, set_env, [rabbit, slave_wait_timeout, 0]) || Nodename <- [A, B, C]],
+
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_stop_pending_followers-queue">>,
+ declare(Ch, Q, 5),
+ publish_many(Ch, Q, 20000),
+
+ [begin
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"^mirror_stop_pending_followers-queue$">>, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 2),
+ rabbit_ct_broker_helpers:clear_policy(
+ Config, A, <<"^mirror_stop_pending_followers-queue$">>)
+ end || _ <- lists:seq(1, 15)],
+
+ delete(Ch, Q),
+
+ [ok = rabbit_ct_broker_helpers:rpc(
+ Config, Nodename, application, set_env, [rabbit, slave_wait_timeout, 15000]) || Nodename <- [A, B, C]],
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q, Args) when is_list(Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args});
+declare(Ch, Q, Max) ->
+ declare(Ch, Q, arguments(Max)).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P) || P <- Ps],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish_payload(Ch, Q, PPds) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P, Pd) || {P, Pd} <- PPds],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish_many(_Ch, _Q, 0) -> ok;
+publish_many( Ch, Q, N) -> publish1(Ch, Q, rand:uniform(5)),
+ publish_many(Ch, Q, N - 1).
+
+publish1(Ch, Q, P) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = priority2bin(P)}).
+
+publish1(Ch, Q, P, Pd) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = Pd}).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P) -> #'P_basic'{priority = P,
+ delivery_mode = 2}.
+
+consume(Ch, Q, Ack) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = Ack =:= no_ack,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+cancel(Ch) ->
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, P) ->
+ PBin = priority2bin(P),
+ receive
+ {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag)
+ end.
+
+get_all(Ch, Q, Ack, Ps) ->
+ DTags = get_partial(Ch, Q, Ack, Ps),
+ get_empty(Ch, Q),
+ DTags.
+
+get_partial(Ch, Q, Ack, Ps) ->
+ [get_ok(Ch, Q, Ack, priority2bin(P)) || P <- Ps].
+
+get_empty(Ch, Q) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_ok(Ch, Q, Ack, PBin) ->
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q,
+ no_ack = Ack =:= no_ack}),
+ ?assertEqual(PBin, PBin2),
+ maybe_ack(Ch, Ack, DTag).
+
+get_payload(Ch, Q, Ack, Ps) ->
+ [get_ok(Ch, Q, Ack, P) || P <- Ps].
+
+get_without_ack(Ch, Q) ->
+ {#'basic.get_ok'{}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q, no_ack = false}).
+
+maybe_ack(Ch, do_ack, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ DTag;
+maybe_ack(_Ch, _, DTag) ->
+ DTag.
+
+arguments(none) -> [];
+arguments(Max) -> [{<<"x-max-priority">>, byte, Max}].
+
+priority2bin(undefined) -> <<"undefined">>;
+priority2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+%%----------------------------------------------------------------------------
+
+wait_for_sync(Config, Nodename, Q) ->
+ wait_for_sync(Config, Nodename, Q, 1).
+
+wait_for_sync(Config, Nodename, Q, Nodes) ->
+ wait_for_sync(Config, Nodename, Q, Nodes, 600).
+
+wait_for_sync(_, _, _, _, 0) ->
+ throw(sync_timeout);
+wait_for_sync(Config, Nodename, Q, Nodes, N) ->
+ case synced(Config, Nodename, Q, Nodes) of
+ true -> ok;
+ false -> timer:sleep(100),
+ wait_for_sync(Config, Nodename, Q, Nodes, N-1)
+ end.
+
+synced(Config, Nodename, Q, Nodes) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, synchronised_slave_pids]]),
+ [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info,
+ Q =:= Q1],
+ length(SSPids) =:= Nodes.
+
+synced_msgs(Config, Nodename, Q, Expected) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, messages]]),
+ [M] = [M || [{name, Q1}, {messages, M}] <- Info, Q =:= Q1],
+ M =:= Expected.
+
+nodes_and_pids(SPids) ->
+ lists:zip([node(S) || S <- SPids], SPids).
+
+slave_pids(Config, Nodename, Q) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, slave_pids]]),
+ [SPids] = [SPids || [{name, Q1}, {slave_pids, SPids}] <- Info,
+ Q =:= Q1],
+ SPids.
+
+queue_pid(Config, Nodename, Q) ->
+ Info = rabbit_ct_broker_helpers:rpc(
+ Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, pid]]),
+ [Pid] = [P || [{name, Q1}, {pid, P}] <- Info, Q =:= Q1],
+ Pid.
+
+%%----------------------------------------------------------------------------
diff --git a/deps/rabbit/test/priority_queue_recovery_SUITE.erl b/deps/rabbit/test/priority_queue_recovery_SUITE.erl
new file mode 100644
index 0000000000..9679fb0449
--- /dev/null
+++ b/deps/rabbit/test/priority_queue_recovery_SUITE.erl
@@ -0,0 +1,144 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(priority_queue_recovery_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ recovery %% Restart RabbitMQ.
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+recovery(Config) ->
+ {Conn, Ch} = open(Config),
+ Q = <<"recovery-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 0),
+
+ {Conn2, Ch2} = open(Config, 1),
+ get_all(Ch2, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ delete(Ch2, Q),
+ rabbit_ct_client_helpers:close_channel(Ch2),
+ rabbit_ct_client_helpers:close_connection(Conn2),
+ passed.
+
+
+%%----------------------------------------------------------------------------
+
+open(Config) ->
+ open(Config, 0).
+
+open(Config, NodeIndex) ->
+ rabbit_ct_client_helpers:open_connection_and_channel(Config, NodeIndex).
+
+declare(Ch, Q, Args) when is_list(Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args});
+declare(Ch, Q, Max) ->
+ declare(Ch, Q, arguments(Max)).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P) || P <- Ps],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish1(Ch, Q, P) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = priority2bin(P)}).
+
+publish1(Ch, Q, P, Pd) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = Pd}).
+
+get_all(Ch, Q, Ack, Ps) ->
+ DTags = get_partial(Ch, Q, Ack, Ps),
+ get_empty(Ch, Q),
+ DTags.
+
+get_partial(Ch, Q, Ack, Ps) ->
+ [get_ok(Ch, Q, Ack, priority2bin(P)) || P <- Ps].
+
+get_empty(Ch, Q) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_ok(Ch, Q, Ack, PBin) ->
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q,
+ no_ack = Ack =:= no_ack}),
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag).
+
+maybe_ack(Ch, do_ack, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ DTag;
+maybe_ack(_Ch, _, DTag) ->
+ DTag.
+
+arguments(none) -> [];
+arguments(Max) -> [{<<"x-max-priority">>, byte, Max}].
+
+priority2bin(undefined) -> <<"undefined">>;
+priority2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P) -> #'P_basic'{priority = P,
+ delivery_mode = 2}.
diff --git a/deps/rabbit/test/product_info_SUITE.erl b/deps/rabbit/test/product_info_SUITE.erl
new file mode 100644
index 0000000000..207f9222d1
--- /dev/null
+++ b/deps/rabbit/test/product_info_SUITE.erl
@@ -0,0 +1,171 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(product_info_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([suite/0,
+ all/0,
+ groups/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+
+ override_product_name_in_conf/1,
+ override_product_version_in_conf/1,
+ set_motd_in_conf/1
+ ]).
+
+suite() ->
+ [{timetrap, {minutes, 5}}].
+
+all() ->
+ [
+ {group, parallel}
+ ].
+
+groups() ->
+ [
+ {parallel, [],
+ [override_product_name_in_conf,
+ override_product_version_in_conf,
+ set_motd_in_conf]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ case os:type() of
+ {unix, _} ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config);
+ _ ->
+ {skip, "This testsuite is only relevant on Unix"}
+ end.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 1,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ Config2 = case Testcase of
+ override_product_name_in_conf ->
+ rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit, [{product_name, "MyProduct"}]});
+ override_product_version_in_conf ->
+ rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit, [{product_version, "MyVersion"}]});
+ set_motd_in_conf ->
+ PrivDir = ?config(priv_dir, Config),
+ MotdFile = filename:join(PrivDir, "motd.txt"),
+ ok = file:write_file(MotdFile, <<"My MOTD\n">>),
+ C2 = rabbit_ct_helpers:set_config(
+ Config1,
+ {motd_file, MotdFile}),
+ rabbit_ct_helpers:merge_app_env(
+ C2,
+ {rabbit, [{motd_file, MotdFile}]})
+ end,
+ rabbit_ct_helpers:run_steps(Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+override_product_name_in_conf(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ProductName = "MyProduct",
+ ?assertEqual(ProductName,
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit, product_name, [])),
+ grep_in_log_file(Config, A, ProductName),
+ grep_in_stdout(Config, A, ProductName).
+
+override_product_version_in_conf(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ProductVersion = "MyVersion",
+ ?assertEqual(ProductVersion,
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit, product_version, [])),
+ grep_in_log_file(Config, A, ProductVersion),
+ grep_in_stdout(Config, A, ProductVersion).
+
+set_motd_in_conf(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ MotdFile = ?config(motd_file, Config),
+ ?assertEqual(MotdFile,
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit, motd_file, [])),
+ {ok, Motd0} = file:read_file(MotdFile),
+ Motd = string:trim(Motd0, trailing, [$\r,$\n]),
+ ?assertEqual(Motd,
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit, motd, [])),
+ grep_in_log_file(Config, A, Motd),
+ grep_in_stdout(Config, A, Motd).
+
+grep_in_log_file(Config, Node, String) ->
+ [Log | _] = rabbit_ct_broker_helpers:rpc(
+ Config, Node, rabbit, log_locations, []),
+ ct:pal(?LOW_IMPORTANCE, "Grepping \"~s\" in ~s", [String, Log]),
+ %% We try to grep several times, in case the log file was not
+ %% fsync'd yet (and thus we don't see the content yet).
+ do_grep_in_log_file(String, Log, 30).
+
+do_grep_in_log_file(String, Log, Retries) ->
+ {ok, Content} = file:read_file(Log),
+ case re:run(Content, ["\\b", String, "\\b"], [{capture, none}]) of
+ match ->
+ ok;
+ nomatch when Retries > 1 ->
+ timer:sleep(1000),
+ do_grep_in_log_file(String, Log, Retries - 1);
+ nomatch ->
+ throw({failed_to_grep, String, Log, Content})
+ end.
+
+grep_in_stdout(Config, Node, String) ->
+ [Log | _] = rabbit_ct_broker_helpers:rpc(
+ Config, Node, rabbit, log_locations, []),
+ LogDir = filename:dirname(Log),
+ Stdout = filename:join(LogDir, "startup_log"),
+ ct:pal(?LOW_IMPORTANCE, "Grepping \"~s\" in ~s", [String, Stdout]),
+ {ok, Content} = file:read_file(Stdout),
+ ?assertMatch(
+ match,
+ re:run(Content, ["\\b", String, "\\b"], [{capture, none}])).
diff --git a/deps/rabbit/test/proxy_protocol_SUITE.erl b/deps/rabbit/test/proxy_protocol_SUITE.erl
new file mode 100644
index 0000000000..92c29b6063
--- /dev/null
+++ b/deps/rabbit/test/proxy_protocol_SUITE.erl
@@ -0,0 +1,91 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(proxy_protocol_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() -> [
+ {sequential_tests, [], [
+ proxy_protocol,
+ proxy_protocol_tls
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(Config1, [
+ {rabbit, [
+ {proxy_protocol, true}
+ ]}
+ ]),
+ Config3 = rabbit_ct_helpers:set_config(Config2, {rabbitmq_ct_tls_verify, verify_none}),
+ rabbit_ct_helpers:run_setup_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+proxy_protocol(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ ok = inet:send(Socket, <<"AMQP", 0, 0, 9, 1>>),
+ {ok, _Packet} = gen_tcp:recv(Socket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+proxy_protocol_tls(Config) ->
+ app_utils:start_applications([asn1, crypto, public_key, ssl]),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ {ok, SslSocket} = ssl:connect(Socket, [], ?TIMEOUT),
+ ok = ssl:send(SslSocket, <<"AMQP", 0, 0, 9, 1>>),
+ {ok, _Packet} = ssl:recv(SslSocket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+connection_name() ->
+ Pids = pg_local:get_members(rabbit_connections),
+ Pid = lists:nth(1, Pids),
+ {dictionary, Dict} = process_info(Pid, dictionary),
+ {process_name, {rabbit_reader, ConnectionName}} = lists:keyfind(process_name, 1, Dict),
+ ConnectionName.
diff --git a/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl b/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl
new file mode 100644
index 0000000000..f79fcae3ce
--- /dev/null
+++ b/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl
@@ -0,0 +1,380 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(publisher_confirms_parallel_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 60000).
+
+-import(quorum_queue_utils, [wait_for_messages/2]).
+
+all() ->
+ [
+ {group, publisher_confirm_tests}
+ ].
+
+groups() ->
+ PublisherConfirmTests = [publisher_confirms,
+ publisher_confirms_with_deleted_queue,
+ confirm_select_ok,
+ confirm_nowait,
+ confirm_ack,
+ confirm_acks,
+ confirm_mandatory_unroutable,
+ confirm_unroutable_message],
+ [
+ {publisher_confirm_tests, [],
+ [
+ {classic_queue, [parallel], PublisherConfirmTests ++ [confirm_nack]},
+ {mirrored_queue, [parallel], PublisherConfirmTests ++ [confirm_nack]},
+ {quorum_queue, [],
+ [
+ {parllel_tests, [parallel], PublisherConfirmTests},
+ confirm_minority
+ ]}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(classic_queue, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, true}]);
+init_per_group(quorum_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(mirrored_queue, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, true}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ Config
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q},
+ {queue_name_2, Q2}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name_2, Config)}),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% To enable confirms, a client sends the confirm.select method
+publisher_confirms(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ amqp_channel:unregister_confirm_handler(Ch),
+ ok.
+
+publisher_confirms_with_deleted_queue(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ amqp_channel:call(Ch, #'queue.delete'{queue = QName}),
+ amqp_channel:wait_for_confirms_or_die(Ch, 5),
+ amqp_channel:unregister_confirm_handler(Ch).
+
+%% Depending on whether no-wait was set or not, the broker may respond with a confirm.select-ok
+confirm_select_ok(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ ?assertEqual(#'confirm.select_ok'{}, amqp_channel:call(Ch, #'confirm.select'{nowait = false})).
+
+confirm_nowait(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ ?assertEqual(ok, amqp_channel:call(Ch, #'confirm.select'{nowait = true})).
+
+%% The broker then confirms messages as it handles them by sending a basic.ack on the same channel.
+%% The delivery-tag field contains the sequence number of the confirmed message.
+confirm_ack(Config) ->
+ %% Ensure we receive an ack and not a nack
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ receive
+ #'basic.ack'{delivery_tag = 1} ->
+ ok
+ after 5000 ->
+ throw(missing_ack)
+ end.
+
+%% The broker may also set the multiple field in basic.ack to indicate that all messages up to
+%% and including the one with the sequence number have been handled.
+confirm_acks(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>, <<"msg4">>]),
+ receive_many(lists:seq(1, 4)).
+
+%% For unroutable messages, the broker will issue a confirm once the exchange verifies a message
+%% won't route to any queue (returns an empty list of queues).
+%% If the message is also published as mandatory, the basic.return is sent to the client before
+%% basic.ack.
+confirm_mandatory_unroutable(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ amqp_channel:register_return_handler(Ch, self()),
+ ok = amqp_channel:call(Ch, #'basic.publish'{routing_key = QName,
+ mandatory = true}, #amqp_msg{payload = <<"msg1">>}),
+ receive
+ {#'basic.return'{}, _} ->
+ ok
+ after 5000 ->
+ throw(missing_return)
+ end,
+ receive
+ #'basic.ack'{delivery_tag = 1} ->
+ ok
+ after 5000 ->
+ throw(missing_ack)
+ end.
+
+confirm_unroutable_message(Config) ->
+ %% Ensure we receive a nack for an unroutable message
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ receive
+ {#'basic.return'{}, _} ->
+ throw(unexpected_basic_return);
+ #'basic.ack'{delivery_tag = 1} ->
+ ok
+ after 5000 ->
+ throw(missing_ack)
+ end.
+
+%% In exceptional cases when the broker is unable to handle messages successfully,
+%% instead of a basic.ack, the broker will send a basic.nack.
+%% basic.nack will only be delivered if an internal error occurs in the Erlang process
+%% responsible for a queue.
+%% This test crashes the queue before it has time to answer, but it only works for classic
+%% queues. On quorum queues the followers will take over and rabbit_fifo_client will resend
+%% any pending messages.
+confirm_nack(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, confirm_nack1, [Config]).
+
+confirm_nack1(Config) ->
+ {_Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+ ok = rabbit_channel:do(Ch, #'channel.open'{}),
+ receive #'channel.open_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+ end,
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName1 = ?config(queue_name, Config),
+ QName2 = ?config(queue_name_2, Config),
+ DeclareBindDurableQueue =
+ fun(QName) ->
+ rabbit_channel:do(Ch, #'queue.declare'{durable = Durable,
+ queue = QName,
+ arguments = Args}),
+ receive #'queue.declare_ok'{} ->
+ rabbit_channel:do(Ch, #'queue.bind'{
+ queue = QName,
+ exchange = <<"amq.direct">>,
+ routing_key = "confirms-magic" }),
+ receive #'queue.bind_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_bind_queue)
+ end
+ after ?TIMEOUT -> throw(failed_to_declare_queue)
+ end
+ end,
+ %% Declare and bind two queues
+ DeclareBindDurableQueue(QName1),
+ DeclareBindDurableQueue(QName2),
+ %% Get the first one's pid (we'll crash it later)
+ {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)),
+ QPid1 = amqqueue:get_pid(Q1),
+ %% Enable confirms
+ rabbit_channel:do(Ch, #'confirm.select'{}),
+ receive
+ #'confirm.select_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_enable_confirms)
+ end,
+ %% Publish a message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>,
+ routing_key = "confirms-magic"
+ },
+ rabbit_basic:build_content(
+ #'P_basic'{delivery_mode = 2}, <<"">>)),
+ %% We must not kill the queue before the channel has processed the
+ %% 'publish'.
+ ok = rabbit_channel:flush(Ch),
+ %% Crash the queue
+ QPid1 ! boom,
+ %% Wait for a nack
+ receive
+ #'basic.nack'{} -> ok;
+ #'basic.ack'{} -> throw(received_ack_instead_of_nack)
+ after ?TIMEOUT-> throw(did_not_receive_nack)
+ end,
+ receive
+ #'basic.ack'{} -> throw(received_ack_when_none_expected)
+ after 1000 -> ok
+ end,
+ %% Cleanup
+ unlink(Ch),
+ ok = rabbit_channel:shutdown(Ch),
+ passed.
+
+%% The closest to a nack behaviour that we can get on quorum queues is not answering while
+%% the cluster is in minority. Once the cluster recovers, a 'basic.ack' will be issued.
+confirm_minority(Config) ->
+ [_A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, C),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ receive
+ #'basic.nack'{} -> ok;
+ #'basic.ack'{} -> throw(unexpected_ack)
+ after 120000 ->
+ ok
+ end,
+ ok = rabbit_ct_broker_helpers:start_node(Config, B),
+ publish(Ch, QName, [<<"msg2">>]),
+ receive
+ #'basic.nack'{} -> throw(unexpected_nack);
+ #'basic.ack'{} -> ok
+ after 60000 ->
+ throw(missing_ack)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% Test helpers
+%%%%%%%%%%%%%%%%%%%%%%%%
+declare_queue(Ch, Config, QName) ->
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = Durable}).
+
+publish(Ch, QName, Payloads) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload})
+ || Payload <- Payloads].
+
+publish(Ch, QName, Payloads, Headers) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = Payload,
+ props = #'P_basic'{headers = Headers}})
+ || Payload <- Payloads].
+
+consume(Ch, QName, Payloads) ->
+ [begin
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ DTag
+ end || Payload <- Payloads].
+
+consume_empty(Ch, QName) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+sync_mirrors(QName, Config) ->
+ case ?config(is_mirrored, Config) of
+ true ->
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
+ _ -> ok
+ end.
+
+receive_many([]) ->
+ ok;
+receive_many(DTags) ->
+ receive
+ #'basic.ack'{delivery_tag = DTag, multiple = true} ->
+ receive_many(DTags -- lists:seq(1, DTag));
+ #'basic.ack'{delivery_tag = DTag, multiple = false} ->
+ receive_many(DTags -- [DTag])
+ after 5000 ->
+ throw(missing_ack)
+ end.
diff --git a/deps/rabbit/test/queue_length_limits_SUITE.erl b/deps/rabbit/test/queue_length_limits_SUITE.erl
new file mode 100644
index 0000000000..b86f502869
--- /dev/null
+++ b/deps/rabbit/test/queue_length_limits_SUITE.erl
@@ -0,0 +1,382 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(queue_length_limits_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT_LIST_OPS_PASS, 5000).
+-define(TIMEOUT, 30000).
+-define(TIMEOUT_CHANNEL_EXCEPTION, 5000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ MaxLengthTests = [max_length_default,
+ max_length_bytes_default,
+ max_length_drop_head,
+ max_length_bytes_drop_head,
+ max_length_reject_confirm,
+ max_length_bytes_reject_confirm,
+ max_length_drop_publish,
+ max_length_drop_publish_requeue,
+ max_length_bytes_drop_publish],
+ [
+ {parallel_tests, [parallel], [
+ {max_length_classic, [], MaxLengthTests},
+ {max_length_quorum, [], [max_length_default,
+ max_length_bytes_default]
+ },
+ {max_length_mirrored, [], MaxLengthTests}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(max_length_classic, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, false}]);
+init_per_group(max_length_quorum, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(max_length_mirrored, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, false}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [])
+ end.
+
+end_per_group(max_length_mirrored, Config) ->
+ rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"^max_length.*queue">>),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{is_mirrored, false}]),
+ Config1;
+end_per_group(queue_max_length, Config) ->
+ Config;
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config)
+ when Testcase == max_length_drop_publish; Testcase == max_length_bytes_drop_publish;
+ Testcase == max_length_drop_publish_requeue;
+ Testcase == max_length_reject_confirm; Testcase == max_length_bytes_reject_confirm;
+ Testcase == max_length_drop_head; Testcase == max_length_bytes_drop_head;
+ Testcase == max_length_default; Testcase == max_length_bytes_default ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, 0),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+max_length_bytes_drop_head(Config) ->
+ max_length_bytes_drop_head(Config, [{<<"x-overflow">>, longstr, <<"drop-head">>}]).
+
+max_length_bytes_default(Config) ->
+ max_length_bytes_drop_head(Config, []).
+
+max_length_bytes_drop_head(Config, ExtraArgs) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ MaxLengthBytesArgs = [{<<"x-max-length-bytes">>, long, 100}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthBytesArgs ++ Args ++ ExtraArgs, durable = Durable}),
+
+ %% 80 bytes payload
+ Payload1 = << <<"1">> || _ <- lists:seq(1, 80) >>,
+ Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
+ Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
+ check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3).
+
+max_length_drop_head(Config) ->
+ max_length_drop_head(Config, [{<<"x-overflow">>, longstr, <<"drop-head">>}]).
+
+max_length_default(Config) ->
+ %% Defaults to drop_head
+ max_length_drop_head(Config, []).
+
+max_length_drop_head(Config, ExtraArgs) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ Args ++ ExtraArgs, durable = Durable}),
+
+ check_max_length_drops_head(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
+
+max_length_reject_confirm(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ QName = ?config(queue_name, Config),
+ Durable = ?config(queue_durable, Config),
+ MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ check_max_length_drops_publish(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>),
+ check_max_length_rejects(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
+
+max_length_bytes_reject_confirm(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ QNameBytes = ?config(queue_name, Config),
+ Durable = ?config(queue_durable, Config),
+ MaxLengthBytesArgs = [{<<"x-max-length-bytes">>, long, 100}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QNameBytes, arguments = MaxLengthBytesArgs ++ OverflowArgs ++ Args, durable = Durable}),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+
+ %% 80 bytes payload
+ Payload1 = << <<"1">> || _ <- lists:seq(1, 80) >>,
+ Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
+ Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
+
+ check_max_length_drops_publish(Config, QNameBytes, Ch, Payload1, Payload2, Payload3),
+ check_max_length_rejects(Config, QNameBytes, Ch, Payload1, Payload2, Payload3).
+
+max_length_drop_publish(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
+ %% If confirms are not enable, publishes will still be dropped in reject-publish mode.
+ check_max_length_drops_publish(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
+
+max_length_drop_publish_requeue(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
+ %% If confirms are not enable, publishes will still be dropped in reject-publish mode.
+ check_max_length_requeue(Config, QName, Ch, <<"1">>, <<"2">>).
+
+max_length_bytes_drop_publish(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QNameBytes = ?config(queue_name, Config),
+ MaxLengthBytesArgs = [{<<"x-max-length-bytes">>, long, 100}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QNameBytes, arguments = MaxLengthBytesArgs ++ OverflowArgs ++ Args, durable = Durable}),
+
+ %% 80 bytes payload
+ Payload1 = << <<"1">> || _ <- lists:seq(1, 80) >>,
+ Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
+ Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
+
+ check_max_length_drops_publish(Config, QNameBytes, Ch, Payload1, Payload2, Payload3).
+
+%% -------------------------------------------------------------------
+%% Implementation
+%% -------------------------------------------------------------------
+
+check_max_length_requeue(Config, QName, Ch, Payload1, Payload2) ->
+ sync_mirrors(QName, Config),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ %% A single message is published and consumed
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ {#'basic.get_ok'{delivery_tag = DeliveryTag},
+ #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Another message is published
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+check_max_length_drops_publish(Config, QName, Ch, Payload1, Payload2, Payload3) ->
+ sync_mirrors(QName, Config),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ %% A single message is published and consumed
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Message 2 is dropped, message 1 stays
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Messages 2 and 3 are dropped, message 1 stays
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload3}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+check_max_length_rejects(Config, QName, Ch, Payload1, Payload2, Payload3) ->
+ sync_mirrors(QName, Config),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ flush(),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ %% First message can be enqueued and acks
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ receive #'basic.ack'{} -> ok
+ after 1000 -> error(expected_ack)
+ end,
+
+ %% The message cannot be enqueued and nacks
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ receive #'basic.nack'{} -> ok
+ after 1000 -> error(expected_nack)
+ end,
+
+ %% The message cannot be enqueued and nacks
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload3}),
+ receive #'basic.nack'{} -> ok
+ after 1000 -> error(expected_nack)
+ end,
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Now we can publish message 2.
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ receive #'basic.ack'{} -> ok
+ after 1000 -> error(expected_ack)
+ end,
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3) ->
+ sync_mirrors(QName, Config),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ %% A single message is published and consumed
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Message 1 is replaced by message 2
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Messages 1 and 2 are replaced
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload3}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload3}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+sync_mirrors(QName, Config) ->
+ case rabbit_ct_helpers:get_config(Config, is_mirrored) of
+ true ->
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
+ _ -> ok
+ end.
+
+flush() ->
+ receive _ -> flush()
+ after 10 -> ok
+ end.
diff --git a/deps/rabbit/test/queue_master_location_SUITE.erl b/deps/rabbit/test/queue_master_location_SUITE.erl
new file mode 100644
index 0000000000..fab3eac3f0
--- /dev/null
+++ b/deps/rabbit/test/queue_master_location_SUITE.erl
@@ -0,0 +1,487 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(queue_master_location_SUITE).
+
+%% These tests use an ABC cluster with each node initialised with
+%% a different number of queues. When a queue is declared, different
+%% strategies can be applied to determine the queue's master node. Queue
+%% location strategies can be applied in the following ways;
+%% 1. As policy,
+%% 2. As config (in rabbitmq.config),
+%% 3. or as part of the queue's declare arguments.
+%%
+%% Currently supported strategies are;
+%% min-masters : The queue master node is calculated as the one with the
+%% least bound queues in the cluster.
+%% client-local: The queue master node is the local node from which
+%% the declaration is being carried out from
+%% random : The queue master node is randomly selected.
+%%
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(DEFAULT_VHOST_PATH, (<<"/">>)).
+-define(POLICY, <<"^qm.location$">>).
+
+all() ->
+ [
+ {group, cluster_size_3},
+ {group, maintenance_mode}
+ ].
+
+groups() ->
+ [
+ {cluster_size_3, [], [
+ declare_args,
+ declare_policy,
+ declare_invalid_policy,
+ declare_policy_nodes,
+ declare_policy_all,
+ declare_policy_exactly,
+ declare_config,
+ calculate_min_master,
+ calculate_min_master_with_bindings,
+ calculate_random,
+ calculate_client_local
+ ]},
+
+ {maintenance_mode, [], [
+ declare_with_min_masters_and_some_nodes_under_maintenance,
+ declare_with_min_masters_and_all_nodes_under_maintenance,
+
+ declare_with_random_and_some_nodes_under_maintenance,
+ declare_with_random_and_all_nodes_under_maintenance
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ %% Replaced with a list of node names later
+ {rmq_nodes_count, 3}
+ ]);
+init_per_group(maintenance_mode, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Nodenames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, ClusterSize)
+ ],
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, Nodenames},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ FFEnabled = case Group of
+ maintenance_mode ->
+ rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2,
+ maintenance_mode_status);
+ _ ->
+ ok
+ end,
+ case FFEnabled of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end.
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+%%
+%% Queue 'declarations'
+%%
+
+declare_args(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_min_master(Config, Q).
+
+declare_policy(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ set_location_policy(Config, ?POLICY, <<"min-masters">>),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args=[], none),
+ verify_min_master(Config, Q).
+
+declare_invalid_policy(Config) ->
+ %% Tests that queue masters location returns 'ok', otherwise the validation of
+ %% any other parameter might be skipped and invalid policy accepted.
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
+ {<<"ha-mode">>, <<"exactly">>},
+ %% this field is expected to be an integer
+ {<<"ha-params">>, <<"2">>}],
+ {error_string, _} = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_policy, set,
+ [<<"/">>, ?POLICY, <<".*">>, Policy, 0, <<"queues">>, <<"acting-user">>]).
+
+declare_policy_nodes(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ % Note:
+ % Node0 has 15 queues, Node1 has 8 and Node2 has 1
+ Node0Name = rabbit_data_coercion:to_binary(
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)),
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ Node1Name = rabbit_data_coercion:to_binary(Node1),
+ Nodes = [Node1Name, Node0Name],
+ Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
+ {<<"ha-mode">>, <<"nodes">>},
+ {<<"ha-params">>, Nodes}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
+ <<".*">>, <<"queues">>, Policy),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args=[], none),
+ verify_min_master(Config, Q, Node1).
+
+declare_policy_all(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ % Note:
+ % Node0 has 15 queues, Node1 has 8 and Node2 has 1
+ Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
+ {<<"ha-mode">>, <<"all">>}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
+ <<".*">>, <<"queues">>, Policy),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args=[], none),
+ verify_min_master(Config, Q).
+
+declare_policy_exactly(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
+ {<<"ha-mode">>, <<"exactly">>},
+ {<<"ha-params">>, 2}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
+ <<".*">>, <<"queues">>, Policy),
+ QueueRes = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueRes, false, false, _Args=[], none),
+
+ Node0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:control_action(sync_queue, Node0,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, Node0, QueueRes, 1),
+
+ {ok, Queue} = rabbit_ct_broker_helpers:rpc(Config, Node0,
+ rabbit_amqqueue, lookup, [QueueRes]),
+ {MNode0, [SNode], [SSNode]} = rabbit_ct_broker_helpers:rpc(Config, Node0,
+ rabbit_mirror_queue_misc,
+ actual_queue_nodes, [Queue]),
+ ?assertEqual(SNode, SSNode),
+ {ok, MNode1} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ ?assertEqual(MNode0, MNode1),
+ Node2 = rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename),
+ ?assertEqual(MNode1, Node2).
+
+declare_config(Config) ->
+ setup_test_environment(Config),
+ set_location_config(Config, <<"min-masters">>),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args = [], none),
+ verify_min_master(Config, Q),
+ unset_location_config(Config),
+ ok.
+
+%%
+%% Maintenance mode effects
+%%
+
+declare_with_min_masters_and_some_nodes_under_maintenance(Config) ->
+ set_location_policy(Config, ?POLICY, <<"min-masters">>),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 1),
+
+ QName = <<"qm.tests.min_masters.maintenance.case1">>,
+ Resource = rabbit_misc:r(<<"/">>, queue, QName),
+ Record = declare(Config, Resource, false, false, _Args = [], none),
+ %% the only node that's not being drained
+ ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename),
+ node(amqqueue:get_pid(Record))),
+
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 1).
+
+declare_with_min_masters_and_all_nodes_under_maintenance(Config) ->
+ declare_with_all_nodes_under_maintenance(Config, <<"min-masters">>).
+
+declare_with_random_and_some_nodes_under_maintenance(Config) ->
+ set_location_policy(Config, ?POLICY, <<"random">>),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 2),
+
+ QName = <<"qm.tests.random.maintenance.case1">>,
+ Resource = rabbit_misc:r(<<"/">>, queue, QName),
+ Record = declare(Config, Resource, false, false, _Args = [], none),
+ %% the only node that's not being drained
+ ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ node(amqqueue:get_pid(Record))),
+
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 2).
+
+declare_with_random_and_all_nodes_under_maintenance(Config) ->
+ declare_with_all_nodes_under_maintenance(Config, <<"random">>).
+
+declare_with_all_nodes_under_maintenance(Config, Locator) ->
+ set_location_policy(Config, ?POLICY, Locator),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 1),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 2),
+
+ QName = rabbit_data_coercion:to_binary(
+ rabbit_misc:format("qm.tests.~s.maintenance.case2", [Locator])),
+ Resource = rabbit_misc:r(<<"/">>, queue, QName),
+ Record = declare(Config, Resource, false, false, _Args = [], none),
+ %% when queue master locator returns no node, the node that handles
+ %% the declaration method will be used as a fallback
+ ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ node(amqqueue:get_pid(Record))),
+
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 1),
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 2).
+
+%%
+%% Test 'calculations'
+%%
+
+calculate_min_master(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_min_master(Config, Q),
+ ok.
+
+calculate_min_master_with_bindings(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test_bound">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_min_master(Config, Q),
+ %% Add 20 bindings to this queue
+ [ bind(Config, QueueName, integer_to_binary(N)) || N <- lists:seq(1, 20) ],
+
+ QueueName1 = rabbit_misc:r(<<"/">>, queue, Q1 = <<"qm.test_unbound">>),
+ declare(Config, QueueName1, false, false, Args, none),
+ % Another queue should still be on the same node, bindings should
+ % not account for min-masters counting
+ verify_min_master(Config, Q1),
+ ok.
+
+calculate_random(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"random">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_random(Config, Q),
+ ok.
+
+calculate_client_local(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"client-local">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_client_local(Config, Q),
+ ok.
+
+%%
+%% Setup environment
+%%
+
+setup_test_environment(Config) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [distribute_queues(Config, Node) || Node <- Nodes],
+ ok.
+
+distribute_queues(Config, Node) ->
+ ok = rpc:call(Node, application, unset_env, [rabbit, queue_master_location]),
+ Count = case rabbit_ct_broker_helpers:nodename_to_index(Config, Node) of
+ 0 -> 15;
+ 1 -> 8;
+ 2 -> 1
+ end,
+
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Node),
+ ok = declare_queues(Channel, declare_fun(), Count),
+ ok = create_e2e_binding(Channel, [<< "ex_1" >>, << "ex_2" >>]),
+ {ok, Channel}.
+
+%%
+%% Internal queue handling
+%%
+
+declare_queues(Channel, DeclareFun, 1) -> DeclareFun(Channel);
+declare_queues(Channel, DeclareFun, N) ->
+ DeclareFun(Channel),
+ declare_queues(Channel, DeclareFun, N-1).
+
+declare_exchange(Channel, Ex) ->
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = Ex}),
+ {ok, Ex}.
+
+declare_binding(Channel, Binding) ->
+ #'exchange.bind_ok'{} = amqp_channel:call(Channel, Binding),
+ ok.
+
+declare_fun() ->
+ fun(Channel) ->
+ #'queue.declare_ok'{} = amqp_channel:call(Channel, get_random_queue_declare()),
+ ok
+ end.
+
+create_e2e_binding(Channel, ExNamesBin) ->
+ [{ok, Ex1}, {ok, Ex2}] = [declare_exchange(Channel, Ex) || Ex <- ExNamesBin],
+ Binding = #'exchange.bind'{source = Ex1, destination = Ex2},
+ ok = declare_binding(Channel, Binding).
+
+get_random_queue_declare() ->
+ #'queue.declare'{passive = false,
+ durable = false,
+ exclusive = true,
+ auto_delete = false,
+ nowait = false,
+ arguments = []}.
+
+%%
+%% Internal helper functions
+%%
+
+get_cluster() -> [node()|nodes()].
+
+min_master_node(Config) ->
+ hd(lists:reverse(
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename))).
+
+set_location_config(Config, Strategy) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [ok = rabbit_ct_broker_helpers:rpc(Config, Node,
+ application, set_env,
+ [rabbit, queue_master_locator, Strategy]) || Node <- Nodes],
+ ok.
+
+unset_location_config(Config) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [ok = rabbit_ct_broker_helpers:rpc(Config, Node,
+ application, unset_env,
+ [rabbit, queue_master_locator]) || Node <- Nodes],
+ ok.
+
+declare(Config, QueueName, Durable, AutoDelete, Args0, Owner) ->
+ Args1 = [QueueName, Durable, AutoDelete, Args0, Owner, <<"acting-user">>],
+ case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, declare, Args1) of
+ {new, Queue} -> Queue;
+ Other -> Other
+ end.
+
+bind(Config, QueueName, RoutingKey) ->
+ ExchangeName = rabbit_misc:r(QueueName, exchange, <<"amq.direct">>),
+
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_binding, add,
+ [#binding{source = ExchangeName,
+ destination = QueueName,
+ key = RoutingKey,
+ args = []},
+ <<"acting-user">>]).
+
+verify_min_master(Config, Q, MinMasterNode) ->
+ Rpc = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ ?assertEqual({ok, MinMasterNode}, Rpc).
+
+verify_min_master(Config, Q) ->
+ MinMaster = min_master_node(Config),
+ verify_min_master(Config, Q, MinMaster).
+
+verify_random(Config, Q) ->
+ [Node | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {ok, Master} = rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ ?assert(lists:member(Master, Nodes)).
+
+verify_client_local(Config, Q) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Rpc = rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ ?assertEqual({ok, Node}, Rpc).
+
+set_location_policy(Config, Name, Strategy) ->
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
+ Name, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, Strategy}]).
+
+wait_for_sync(Config, Nodename, Q, ExpectedSSPidLen) ->
+ wait_for_sync(Config, Nodename, Q, ExpectedSSPidLen, 600).
+
+wait_for_sync(_, _, _, _, 0) ->
+ throw(sync_timeout);
+wait_for_sync(Config, Nodename, Q, ExpectedSSPidLen, N) ->
+ case synced(Config, Nodename, Q, ExpectedSSPidLen) of
+ true -> ok;
+ false -> timer:sleep(100),
+ wait_for_sync(Config, Nodename, Q, ExpectedSSPidLen, N-1)
+ end.
+
+synced(Config, Nodename, Q, ExpectedSSPidLen) ->
+ Args = [<<"/">>, [name, synchronised_slave_pids]],
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, Args),
+ [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info, Q =:= Q1],
+ length(SSPids) =:= ExpectedSSPidLen.
diff --git a/deps/rabbit/test/queue_parallel_SUITE.erl b/deps/rabbit/test/queue_parallel_SUITE.erl
new file mode 100644
index 0000000000..6f813512f4
--- /dev/null
+++ b/deps/rabbit/test/queue_parallel_SUITE.erl
@@ -0,0 +1,725 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%%
+-module(queue_parallel_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+-import(quorum_queue_utils, [wait_for_messages/2]).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ AllTests = [publish,
+ consume,
+ consume_first_empty,
+ consume_from_empty_queue,
+ consume_and_autoack,
+ subscribe,
+ subscribe_consumers,
+ subscribe_with_autoack,
+ consume_and_ack,
+ consume_and_multiple_ack,
+ subscribe_and_ack,
+ subscribe_and_multiple_ack,
+ subscribe_and_requeue_multiple_nack,
+ subscribe_and_nack,
+ subscribe_and_requeue_nack,
+ subscribe_and_multiple_nack,
+ consume_and_requeue_nack,
+ consume_and_nack,
+ consume_and_requeue_multiple_nack,
+ consume_and_multiple_nack,
+ basic_cancel,
+ purge,
+ basic_recover,
+ delete_immediately_by_resource
+ ],
+ [
+ {parallel_tests, [],
+ [
+ {classic_queue, [parallel], AllTests ++ [delete_immediately_by_pid_succeeds,
+ trigger_message_store_compaction]},
+ {mirrored_queue, [parallel], AllTests ++ [delete_immediately_by_pid_succeeds,
+ trigger_message_store_compaction]},
+ {quorum_queue, [parallel], AllTests ++ [delete_immediately_by_pid_fails]},
+ {quorum_queue_in_memory_limit, [parallel], AllTests ++ [delete_immediately_by_pid_fails]},
+ {quorum_queue_in_memory_bytes, [parallel], AllTests ++ [delete_immediately_by_pid_fails]},
+ {stream_queue, [parallel], [publish,
+ subscribe]}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(classic_queue, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {consumer_args, []},
+ {queue_durable, true}]);
+init_per_group(quorum_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {consumer_args, []},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(quorum_queue_in_memory_limit, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 1}]},
+ {consumer_args, []},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(quorum_queue_in_memory_bytes, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-bytes">>, long, 1}]},
+ {consumer_args, []},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(mirrored_queue, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {consumer_args, []},
+ {queue_durable, true}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(stream_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, stream_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"stream">>}]},
+ {consumer_args, [{<<"x-stream-offset">>, long, 0}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(Group, Config0) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{channel_tick_interval, 1000},
+ {quorum_tick_interval, 1000},
+ {stream_tick_interval, 1000}]}),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config0, [])
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q},
+ {queue_name_2, Q2}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name_2, Config)}),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+publish(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]).
+
+consume(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]).
+
+consume_first_empty(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ consume_empty(Ch, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume(Ch, QName, true, [<<"msg1">>]),
+ rabbit_ct_client_helpers:close_channel(Ch).
+
+consume_from_empty_queue(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ consume_empty(Ch, QName).
+
+consume_and_autoack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume(Ch, QName, true, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]).
+
+subscribe(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ %% Let's set consumer prefetch so it works with stream queues
+ ?assertMatch(#'basic.qos_ok'{},
+ amqp_channel:call(Ch, #'basic.qos'{global = false,
+ prefetch_count = 10})),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+
+ CArgs = ?config(consumer_args, Config),
+ subscribe(Ch, QName, false, CArgs),
+ receive_basic_deliver(false),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]).
+
+subscribe_consumers(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ CArgs = ?config(consumer_args, Config),
+ ?assertMatch(#'basic.qos_ok'{},
+ amqp_channel:call(Ch, #'basic.qos'{global = false,
+ prefetch_count = 10})),
+ subscribe(Ch, QName, false, CArgs),
+
+ %% validate we can retrieve the consumers
+ Consumers = rpc:call(Server, rabbit_amqqueue, consumers_all, [<<"/">>]),
+ [Consumer] = lists:filter(fun(Props) ->
+ Resource = proplists:get_value(queue_name, Props),
+ QName == Resource#resource.name
+ end, Consumers),
+ ?assert(is_pid(proplists:get_value(channel_pid, Consumer))),
+ ?assert(is_binary(proplists:get_value(consumer_tag, Consumer))),
+ ?assertEqual(true, proplists:get_value(ack_required, Consumer)),
+ ?assertEqual(10, proplists:get_value(prefetch_count, Consumer)),
+ ?assertEqual([], proplists:get_value(arguments, Consumer)),
+
+ rabbit_ct_client_helpers:close_channel(Ch).
+
+subscribe_with_autoack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ subscribe(Ch, QName, true, CArgs),
+ receive_basic_deliver(false),
+ receive_basic_deliver(false),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]).
+
+consume_and_ack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DeliveryTag] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_multiple_ack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DeliveryTag] = consume(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = true}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_ack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_multiple_ack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive_basic_deliver(false),
+ receive_basic_deliver(false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = true}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+trigger_message_store_compaction(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ N = 12000,
+ [publish(Ch, QName, [binary:copy(<<"a">>, 5000)]) || _ <- lists:seq(1, N)],
+ wait_for_messages(Config, [[QName, <<"12000">>, <<"12000">>, <<"0">>]]),
+
+ AllDTags = rabbit_ct_client_helpers:consume_without_acknowledging(Ch, QName, N),
+ ToAck = lists:filter(fun (I) -> I > 500 andalso I < 11200 end, AllDTags),
+
+ [amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = Tag,
+ multiple = false}) || Tag <- ToAck],
+
+ %% give compaction a moment to start in and finish
+ timer:sleep(5000),
+ amqp_channel:cast(Ch, #'queue.purge'{queue = QName}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_requeue_multiple_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive_basic_deliver(false),
+ receive_basic_deliver(false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = true,
+ requeue = true}),
+ receive_basic_deliver(true),
+ receive_basic_deliver(true),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true}, _} ->
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1,
+ multiple = true}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_requeue_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [DeliveryTag] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"1">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DeliveryTag] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_requeue_multiple_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DeliveryTag] = consume(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = true,
+ requeue = true}),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_multiple_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DeliveryTag] = consume(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = true,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_requeue_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_multiple_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive_basic_deliver(false),
+ receive_basic_deliver(false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = true,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+%% TODO test with single active
+basic_cancel(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ CTag = atom_to_binary(?FUNCTION_NAME, utf8),
+
+ subscribe(Ch, QName, false, CTag, CArgs),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+ Consumers = rpc:call(Server, rabbit_amqqueue, consumers_all, [<<"/">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ ?assertEqual([], lists:filter(fun(Props) ->
+ Resource = proplists:get_value(queue_name, Props),
+ QName == Resource#resource.name
+ end, Consumers)),
+ publish(Ch, QName, [<<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"2">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag}),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]])
+ after 5000 ->
+ exit(basic_deliver_timeout)
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+purge(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [_] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"1">>, <<"1">>]]),
+ {'queue.purge_ok', 1} = amqp_channel:call(Ch, #'queue.purge'{queue = QName}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+basic_recover(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [_] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.recover'{requeue = true}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+delete_immediately_by_pid_fails(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ Cmd = ["eval", "{ok, Q} = rabbit_amqqueue:lookup(rabbit_misc:r(<<\"/\">>, queue, <<\"" ++ binary_to_list(QName) ++ "\">>)), Pid = rabbit_amqqueue:pid_of(Q), rabbit_amqqueue:delete_immediately([Pid])."],
+ {ok, Msg} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd),
+ ?assertEqual(match, re:run(Msg, ".*error.*", [{capture, none}])),
+
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = Durable,
+ passive = true,
+ auto_delete = false,
+ arguments = Args})),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+delete_immediately_by_pid_succeeds(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ Cmd = ["eval", "{ok, Q} = rabbit_amqqueue:lookup(rabbit_misc:r(<<\"/\">>, queue, <<\"" ++ binary_to_list(QName) ++ "\">>)), Pid = rabbit_amqqueue:pid_of(Q), rabbit_amqqueue:delete_immediately([Pid])."],
+ {ok, Msg} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd),
+ ?assertEqual(match, re:run(Msg, ".*ok.*", [{capture, none}])),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = Durable,
+ passive = true,
+ auto_delete = false,
+ arguments = Args})),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+delete_immediately_by_resource(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ Cmd = ["eval", "rabbit_amqqueue:delete_immediately_by_resource([rabbit_misc:r(<<\"/\">>, queue, <<\"" ++ binary_to_list(QName) ++ "\">>)])."],
+ ?assertEqual({ok, "ok\n"}, rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd)),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = Durable,
+ passive = true,
+ auto_delete = false,
+ arguments = Args})),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% Test helpers
+%%%%%%%%%%%%%%%%%%%%%%%%
+declare_queue(Ch, Config, QName) ->
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = Durable}).
+
+publish(Ch, QName, Payloads) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload})
+ || Payload <- Payloads].
+
+consume(Ch, QName, Payloads) ->
+ consume(Ch, QName, false, Payloads).
+
+consume(Ch, QName, NoAck, Payloads) ->
+ [begin
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName,
+ no_ack = NoAck}),
+ DTag
+ end || Payload <- Payloads].
+
+consume_empty(Ch, QName) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{queue = QName})).
+
+subscribe(Ch, Queue, NoAck, CArgs) ->
+ subscribe(Ch, Queue, NoAck, <<"ctag">>, CArgs).
+
+subscribe(Ch, Queue, NoAck, Ctag, CArgs) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ consumer_tag = Ctag,
+ arguments = CArgs},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = Ctag} ->
+ ok
+ end.
+
+receive_basic_deliver(Redelivered) ->
+ receive
+ {#'basic.deliver'{redelivered = R}, _} when R == Redelivered ->
+ ok
+ end.
+
+flush(T) ->
+ receive X ->
+ ct:pal("flushed ~w", [X]),
+ flush(T)
+ after T ->
+ ok
+ end.
diff --git a/deps/rabbit/test/queue_type_SUITE.erl b/deps/rabbit/test/queue_type_SUITE.erl
new file mode 100644
index 0000000000..aed5ad4ccb
--- /dev/null
+++ b/deps/rabbit/test/queue_type_SUITE.erl
@@ -0,0 +1,275 @@
+-module(queue_type_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, classic},
+ {group, quorum}
+ ].
+
+
+all_tests() ->
+ [
+ smoke,
+ ack_after_queue_delete
+ ].
+
+groups() ->
+ [
+ {classic, [], all_tests()},
+ {quorum, [], all_tests()}
+ ].
+
+init_per_suite(Config0) ->
+ rabbit_ct_helpers:log_environment(),
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{quorum_tick_interval, 1000}]}),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config),
+ ok.
+
+init_per_group(Group, Config) ->
+ ClusterSize = 3,
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Group},
+ {tcp_ports_base}]),
+ Config1b = rabbit_ct_helpers:set_config(Config1,
+ [{queue_type, atom_to_binary(Group, utf8)},
+ {net_ticktime, 10}]),
+ Config2 = rabbit_ct_helpers:run_steps(Config1b,
+ [fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()),
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config2, quorum_queue) of
+ ok ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, application, set_env,
+ [rabbit, channel_tick_interval, 100]),
+ %% HACK: the larger cluster sizes benefit for a bit more time
+ %% after clustering before running the tests.
+ Config3 = case Group of
+ cluster_size_5 ->
+ timer:sleep(5000),
+ Config2;
+ _ ->
+ Config2
+ end,
+
+ rabbit_ct_broker_helpers:set_policy(
+ Config3, 0,
+ <<"ha-policy">>, <<".*">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}]),
+ Config3;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end.
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit,
+ [{core_metrics_gc_interval, 100},
+ {log, [{file, [{level, debug}]}]}]}),
+ {ra, [{min_wal_roll_over_interval, 30000}]}).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
+ Q = rabbit_data_coercion:to_binary(Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1,
+ [{queue_name, Q},
+ {alt_queue_name, <<Q/binary, "_alt">>}
+ ]),
+ rabbit_ct_helpers:run_steps(Config2,
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ catch delete_queues(),
+ Config1 = rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+smoke(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr,
+ ?config(queue_type, Config)}])),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, <<"msg1">>),
+ ct:pal("waiting for confirms from ~s", [QName]),
+ ok = receive
+ #'basic.ack'{} -> ok;
+ #'basic.nack'{} -> fail
+ after 2500 ->
+ flush(),
+ exit(confirm_timeout)
+ end,
+ DTag = basic_get(Ch, QName),
+
+ basic_ack(Ch, DTag),
+ basic_get_empty(Ch, QName),
+
+ %% consume
+ publish(Ch, QName, <<"msg2">>),
+ ConsumerTag1 = <<"ctag1">>,
+ ok = subscribe(Ch, QName, ConsumerTag1),
+ %% receive and ack
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{}} ->
+ basic_ack(Ch, DeliveryTag)
+ after 5000 ->
+ flush(),
+ exit(basic_deliver_timeout)
+ end,
+ basic_cancel(Ch, ConsumerTag1),
+
+ %% assert empty
+ basic_get_empty(Ch, QName),
+
+ %% consume and nack
+ ConsumerTag2 = <<"ctag2">>,
+ ok = subscribe(Ch, QName, ConsumerTag2),
+ publish(Ch, QName, <<"msg3">>),
+ receive
+ {#'basic.deliver'{delivery_tag = T,
+ redelivered = false},
+ #amqp_msg{}} ->
+ basic_cancel(Ch, ConsumerTag2),
+ basic_nack(Ch, T)
+ after 5000 ->
+ exit(basic_deliver_timeout)
+ end,
+ %% get and ack
+ basic_ack(Ch, basic_get(Ch, QName)),
+ ok.
+
+ack_after_queue_delete(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr,
+ ?config(queue_type, Config)}])),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, <<"msg1">>),
+ ct:pal("waiting for confirms from ~s", [QName]),
+ ok = receive
+ #'basic.ack'{} -> ok;
+ #'basic.nack'{} ->
+ ct:fail("confirm nack - expected ack")
+ after 2500 ->
+ flush(),
+ exit(confirm_timeout)
+ end,
+
+ DTag = basic_get(Ch, QName),
+
+ ChRef = erlang:monitor(process, Ch),
+ #'queue.delete_ok'{} = delete(Ch, QName),
+
+ basic_ack(Ch, DTag),
+ %% assert no channel error
+ receive
+ {'DOWN', ChRef, process, _, _} ->
+ ct:fail("unexpected channel closure")
+ after 1000 ->
+ ok
+ end,
+ flush(),
+ ok.
+
+%% Utility
+%%
+delete_queues() ->
+ [rabbit_amqqueue:delete(Q, false, false, <<"dummy">>)
+ || Q <- rabbit_amqqueue:list()].
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Queue, Msg) ->
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg}).
+
+basic_get(Ch, Queue) ->
+ {GetOk, _} = Reply = amqp_channel:call(Ch, #'basic.get'{queue = Queue,
+ no_ack = false}),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{}}, Reply),
+ GetOk#'basic.get_ok'.delivery_tag.
+
+basic_get_empty(Ch, Queue) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{queue = Queue,
+ no_ack = false})).
+
+subscribe(Ch, Queue, CTag) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = false,
+ consumer_tag = CTag},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = CTag} ->
+ ok
+ after 5000 ->
+ exit(basic_consume_timeout)
+ end.
+
+basic_ack(Ch, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag,
+ multiple = false}).
+
+basic_cancel(Ch, CTag) ->
+ #'basic.cancel_ok'{} =
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}).
+
+basic_nack(Ch, DTag) ->
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ requeue = true,
+ multiple = false}).
+
+flush() ->
+ receive
+ Any ->
+ ct:pal("flush ~p", [Any]),
+ flush()
+ after 0 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl
new file mode 100644
index 0000000000..36a6d41a61
--- /dev/null
+++ b/deps/rabbit/test/quorum_queue_SUITE.erl
@@ -0,0 +1,2792 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(quorum_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(quorum_queue_utils, [wait_for_messages_ready/3,
+ wait_for_messages_pending_ack/3,
+ wait_for_messages_total/3,
+ wait_for_messages/2,
+ dirty_query/3,
+ ra_name/1,
+ is_mixed_versions/0]).
+
+-compile(export_all).
+
+suite() ->
+ [{timetrap, 5 * 60000}].
+
+all() ->
+ [
+ {group, single_node},
+ {group, unclustered},
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {single_node, [], all_tests()},
+ {single_node, [], memory_tests()},
+ {single_node, [], [node_removal_is_quorum_critical]},
+ {unclustered, [], [
+ {cluster_size_2, [], [add_member]}
+ ]},
+ {clustered, [], [
+ {cluster_size_2, [], [cleanup_data_dir]},
+ {cluster_size_2, [], [add_member_not_running,
+ add_member_classic,
+ add_member_already_a_member,
+ add_member_not_found,
+ delete_member_not_running,
+ delete_member_classic,
+ delete_member_queue_not_found,
+ delete_member,
+ delete_member_not_a_member,
+ node_removal_is_quorum_critical]
+ ++ all_tests()},
+ {cluster_size_2, [], memory_tests()},
+ {cluster_size_3, [], [
+ declare_during_node_down,
+ simple_confirm_availability_on_leader_change,
+ publishing_to_unavailable_queue,
+ confirm_availability_on_leader_change,
+ recover_from_single_failure,
+ recover_from_multiple_failures,
+ leadership_takeover,
+ delete_declare,
+ delete_member_during_node_down,
+ metrics_cleanup_on_leadership_takeover,
+ metrics_cleanup_on_leader_crash,
+ consume_in_minority,
+ shrink_all,
+ rebalance,
+ file_handle_reservations,
+ file_handle_reservations_above_limit,
+ node_removal_is_not_quorum_critical
+ ]},
+ {cluster_size_5, [], [start_queue,
+ start_queue_concurrent,
+ quorum_cluster_size_3,
+ quorum_cluster_size_7,
+ node_removal_is_not_quorum_critical
+ ]},
+ {clustered_with_partitions, [], [
+ reconnect_consumer_and_publish,
+ reconnect_consumer_and_wait,
+ reconnect_consumer_and_wait_channel_down
+ ]}
+ ]}
+ ].
+
+all_tests() ->
+ [
+ declare_args,
+ declare_invalid_properties,
+ declare_server_named,
+ start_queue,
+ stop_queue,
+ restart_queue,
+ restart_all_types,
+ stop_start_rabbit_app,
+ publish_and_restart,
+ subscribe_should_fail_when_global_qos_true,
+ dead_letter_to_classic_queue,
+ dead_letter_with_memory_limit,
+ dead_letter_to_quorum_queue,
+ dead_letter_from_classic_to_quorum_queue,
+ dead_letter_policy,
+ cleanup_queue_state_on_channel_after_publish,
+ cleanup_queue_state_on_channel_after_subscribe,
+ sync_queue,
+ cancel_sync_queue,
+ idempotent_recover,
+ vhost_with_quorum_queue_is_deleted,
+ delete_immediately_by_resource,
+ consume_redelivery_count,
+ subscribe_redelivery_count,
+ message_bytes_metrics,
+ queue_length_limit_drop_head,
+ queue_length_limit_reject_publish,
+ subscribe_redelivery_limit,
+ subscribe_redelivery_policy,
+ subscribe_redelivery_limit_with_dead_letter,
+ queue_length_in_memory_limit_basic_get,
+ queue_length_in_memory_limit_subscribe,
+ queue_length_in_memory_limit,
+ queue_length_in_memory_limit_returns,
+ queue_length_in_memory_bytes_limit_basic_get,
+ queue_length_in_memory_bytes_limit_subscribe,
+ queue_length_in_memory_bytes_limit,
+ queue_length_in_memory_purge,
+ in_memory,
+ consumer_metrics,
+ invalid_policy,
+ delete_if_empty,
+ delete_if_unused,
+ queue_ttl,
+ peek,
+ consumer_priorities
+ ].
+
+memory_tests() ->
+ [
+ memory_alarm_rolls_wal
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config0) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{quorum_tick_interval, 1000}]}),
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config1, {aten, [{poll_interval, 1000}]}),
+ rabbit_ct_helpers:run_setup_steps(
+ Config,
+ [fun rabbit_ct_broker_helpers:configure_dist_proxy/1]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(clustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(unclustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered_with_partitions, Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "clustered_with_partitions is too unreliable in mixed mode"};
+ false ->
+ rabbit_ct_helpers:set_config(Config, [{net_ticktime, 10}])
+ end;
+init_per_group(Group, Config) ->
+ ClusterSize = case Group of
+ single_node -> 1;
+ cluster_size_2 -> 2;
+ cluster_size_3 -> 3;
+ cluster_size_5 -> 5
+ end,
+ IsMixed = not (false == os:getenv("SECONDARY_UMBRELLA")),
+ case ClusterSize of
+ 2 when IsMixed ->
+ {skip, "cluster size 2 isn't mixed versions compatible"};
+ _ ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Group},
+ {tcp_ports_base}]),
+ Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]),
+ Ret = rabbit_ct_helpers:run_steps(Config1b,
+ [fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()),
+ case Ret of
+ {skip, _} ->
+ Ret;
+ Config2 ->
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, quorum_queue),
+ case EnableFF of
+ ok ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, application, set_env,
+ [rabbit, channel_tick_interval, 100]),
+ %% HACK: the larger cluster sizes benefit for a bit
+ %% more time after clustering before running the
+ %% tests.
+ case Group of
+ cluster_size_5 ->
+ timer:sleep(5000),
+ Config2;
+ _ ->
+ Config2
+ end;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end
+ end
+ end.
+
+end_per_group(clustered, Config) ->
+ Config;
+end_per_group(unclustered, Config) ->
+ Config;
+end_per_group(clustered_with_partitions, Config) ->
+ Config;
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) when Testcase == reconnect_consumer_and_publish;
+ Testcase == reconnect_consumer_and_wait;
+ Testcase == reconnect_consumer_and_wait_channel_down ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Q = rabbit_data_coercion:to_binary(Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1,
+ [{rmq_nodes_count, 3},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base},
+ {queue_name, Q},
+ {alt_queue_name, <<Q/binary, "_alt">>}
+ ]),
+ Ret = rabbit_ct_helpers:run_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case Ret of
+ {skip, _} ->
+ Ret;
+ Config3 ->
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config3, quorum_queue),
+ case EnableFF of
+ ok ->
+ Config3;
+ Skip ->
+ end_per_testcase(Testcase, Config3),
+ Skip
+ end
+ end;
+init_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
+ Q = rabbit_data_coercion:to_binary(Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1,
+ [{queue_name, Q},
+ {alt_queue_name, <<Q/binary, "_alt">>}
+ ]),
+ rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()).
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [{core_metrics_gc_interval, 100}]}),
+ {ra, [{min_wal_roll_over_interval, 30000}]}).
+
+end_per_testcase(Testcase, Config) when Testcase == reconnect_consumer_and_publish;
+ Testcase == reconnect_consumer_and_wait;
+ Testcase == reconnect_consumer_and_wait_channel_down ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase);
+end_per_testcase(Testcase, Config) ->
+ catch delete_queues(),
+ Config1 = rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+declare_args(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-length">>, long, 2000},
+ {<<"x-max-length-bytes">>, long, 2000}]),
+ assert_queue_type(Server, LQ, rabbit_quorum_queue),
+
+ DQ = <<"classic-declare-args-q">>,
+ declare(Ch, DQ, [{<<"x-queue-type">>, longstr, <<"classic">>}]),
+ assert_queue_type(Server, DQ, rabbit_classic_queue),
+
+ DQ2 = <<"classic-q2">>,
+ declare(Ch, DQ2),
+ assert_queue_type(Server, DQ2, rabbit_classic_queue).
+
+declare_invalid_properties(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ LQ = ?config(queue_name, Config),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = LQ,
+ auto_delete = true,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = LQ,
+ exclusive = true,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = LQ,
+ durable = false,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]})).
+
+declare_server_named(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ declare(rabbit_ct_client_helpers:open_channel(Config, Server),
+ <<"">>, [{<<"x-queue-type">>, longstr, <<"quorum">>}])).
+
+start_queue(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Check that the application and one ra node are up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = Children + 1,
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))),
+
+ %% Test declare an existing queue
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Test declare with same arguments
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Test declare an existing queue with different arguments
+ ?assertExit(_, declare(Ch, LQ, [])),
+
+ %% Check that the application and process are still up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))).
+
+start_queue_concurrent(Config) ->
+ Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ LQ = ?config(queue_name, Config),
+ Self = self(),
+ [begin
+ _ = spawn_link(fun () ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, Server),
+ %% Test declare an existing queue
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ,
+ [{<<"x-queue-type">>,
+ longstr,
+ <<"quorum">>}])),
+ Self ! {done, Server}
+ end)
+ end || Server <- Servers],
+
+ [begin
+ receive {done, Server} -> ok
+ after 5000 -> exit({await_done_timeout, Server})
+ end
+ end || Server <- Servers],
+
+
+ ok.
+
+quorum_cluster_size_3(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "quorum_cluster_size_3 tests isn't mixed version reliable"};
+ false ->
+ quorum_cluster_size_x(Config, 3, 3)
+ end.
+
+quorum_cluster_size_7(Config) ->
+ quorum_cluster_size_x(Config, 7, 5).
+
+quorum_cluster_size_x(Config, Max, Expected) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ RaName = ra_name(QQ),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-quorum-initial-group-size">>, long, Max}])),
+ {ok, Members, _} = ra:members({RaName, Server}),
+ ?assertEqual(Expected, length(Members)),
+ Info = rpc:call(Server, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QQ)]),
+ MembersQ = proplists:get_value(members, Info),
+ ?assertEqual(Expected, length(MembersQ)).
+
+stop_queue(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Check that the application and one ra node are up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = Children + 1,
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))),
+
+ %% Delete the quorum queue
+ ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = LQ})),
+ %% Check that the application and process are down
+ wait_until(fun() ->
+ Children == length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))
+ end),
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))).
+
+restart_queue(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server),
+
+ %% Check that the application and one ra node are up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = Children + 1,
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))),
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ delete_queues(Ch2, [LQ]).
+
+idempotent_recover(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% kill default vhost to trigger recovery
+ [{_, SupWrapperPid, _, _} | _] = rpc:call(Server, supervisor,
+ which_children,
+ [rabbit_vhost_sup_sup]),
+ [{_, Pid, _, _} | _] = rpc:call(Server, supervisor,
+ which_children,
+ [SupWrapperPid]),
+ %% kill the vhost process to trigger recover
+ rpc:call(Server, erlang, exit, [Pid, kill]),
+
+ timer:sleep(1000),
+ %% validate quorum queue is still functional
+ RaName = ra_name(LQ),
+ {ok, _, _} = ra:members({RaName, Server}),
+ %% validate vhosts are running - or rather validate that at least one
+ %% vhost per cluster is running
+ [begin
+ #{cluster_state := ServerStatuses} = maps:from_list(I),
+ ?assertMatch(#{Server := running}, maps:from_list(ServerStatuses))
+ end || I <- rpc:call(Server, rabbit_vhost,info_all, [])],
+ ok.
+
+vhost_with_quorum_queue_is_deleted(Config) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ VHost = <<"vhost2">>,
+ QName = atom_to_binary(?FUNCTION_NAME, utf8),
+ RaName = binary_to_atom(<<VHost/binary, "_", QName/binary>>, utf8),
+ User = ?config(rmq_username, Config),
+ ok = rabbit_ct_broker_helpers:add_vhost(Config, Node, VHost, User),
+ ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, VHost),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, Node,
+ VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ UId = rpc:call(Node, ra_directory, where_is, [RaName]),
+ ?assert(UId =/= undefined),
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, VHost),
+ %% validate quorum queues got deleted
+ undefined = rpc:call(Node, ra_directory, where_is, [RaName]),
+ ok.
+
+restart_all_types(Config) ->
+ %% Test the node restart with both types of queues (quorum and classic) to
+ %% ensure there are no regressions
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ1 = <<"restart_all_types-qq1">>,
+ ?assertEqual({'queue.declare_ok', QQ1, 0, 0},
+ declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ QQ2 = <<"restart_all_types-qq2">>,
+ ?assertEqual({'queue.declare_ok', QQ2, 0, 0},
+ declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ CQ1 = <<"restart_all_types-classic1">>,
+ ?assertEqual({'queue.declare_ok', CQ1, 0, 0}, declare(Ch, CQ1, [])),
+ rabbit_ct_client_helpers:publish(Ch, CQ1, 1),
+ CQ2 = <<"restart_all_types-classic2">>,
+ ?assertEqual({'queue.declare_ok', CQ2, 0, 0}, declare(Ch, CQ2, [])),
+ rabbit_ct_client_helpers:publish(Ch, CQ2, 1),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server),
+
+ %% Check that the application and two ra nodes are up. Queues are restored
+ %% after the broker is marked as "ready", that's why we need to wait for
+ %% the condition.
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = length(Children) + 2,
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ Expected =:= length(
+ rpc:call(
+ Server,
+ supervisor,
+ which_children,
+ [ra_server_sup_sup]))
+ end, 60000),
+ %% Check the classic queues restarted correctly
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ {#'basic.get_ok'{}, #amqp_msg{}} =
+ amqp_channel:call(Ch2, #'basic.get'{queue = CQ1, no_ack = false}),
+ {#'basic.get_ok'{}, #amqp_msg{}} =
+ amqp_channel:call(Ch2, #'basic.get'{queue = CQ2, no_ack = false}),
+ delete_queues(Ch2, [QQ1, QQ2, CQ1, CQ2]).
+
+delete_queues(Ch, Queues) ->
+ [amqp_channel:call(Ch, #'queue.delete'{queue = Q}) || Q <- Queues],
+ ok.
+
+stop_start_rabbit_app(Config) ->
+ %% Test start/stop of rabbit app with both types of queues (quorum and
+ %% classic) to ensure there are no regressions
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ1 = <<"stop_start_rabbit_app-qq">>,
+ ?assertEqual({'queue.declare_ok', QQ1, 0, 0},
+ declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ QQ2 = <<"quorum-q2">>,
+ ?assertEqual({'queue.declare_ok', QQ2, 0, 0},
+ declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ CQ1 = <<"stop_start_rabbit_app-classic">>,
+ ?assertEqual({'queue.declare_ok', CQ1, 0, 0}, declare(Ch, CQ1, [])),
+ rabbit_ct_client_helpers:publish(Ch, CQ1, 1),
+ CQ2 = <<"stop_start_rabbit_app-classic2">>,
+ ?assertEqual({'queue.declare_ok', CQ2, 0, 0}, declare(Ch, CQ2, [])),
+ rabbit_ct_client_helpers:publish(Ch, CQ2, 1),
+
+ rabbit_control_helper:command(stop_app, Server),
+ %% Check the ra application has stopped (thus its supervisor and queues)
+ ?assertMatch(false, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+
+ rabbit_control_helper:command(start_app, Server),
+
+ %% Check that the application and two ra nodes are up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = Children + 2,
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))),
+ %% Check the classic queues restarted correctly
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ {#'basic.get_ok'{}, #amqp_msg{}} =
+ amqp_channel:call(Ch2, #'basic.get'{queue = CQ1, no_ack = false}),
+ {#'basic.get_ok'{}, #amqp_msg{}} =
+ amqp_channel:call(Ch2, #'basic.get'{queue = CQ2, no_ack = false}),
+ delete_queues(Ch2, [QQ1, QQ2, CQ1, CQ2]).
+
+publish_confirm(Ch, QName) ->
+ publish(Ch, QName),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ ct:pal("waiting for confirms from ~s", [QName]),
+ receive
+ #'basic.ack'{} ->
+ ct:pal("CONFIRMED! ~s", [QName]),
+ ok;
+ #'basic.nack'{} ->
+ ct:pal("NOT CONFIRMED! ~s", [QName]),
+ fail
+ after 2500 ->
+ exit(confirm_timeout)
+ end.
+
+publish_and_restart(Config) ->
+ %% Test the node restart with both types of queues (quorum and classic) to
+ %% ensure there are no regressions
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ RaName = ra_name(QQ),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server),
+
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ publish(rabbit_ct_client_helpers:open_channel(Config, Server), QQ),
+ wait_for_messages_ready(Servers, RaName, 2),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+consume_in_minority(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false})),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ ok.
+
+shrink_all(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ QQ = ?config(queue_name, Config),
+ AQ = ?config(alt_queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', AQ, 0, 0},
+ declare(Ch, AQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(500),
+ Result = rpc:call(Server0, rabbit_quorum_queue, shrink_all, [Server2]),
+ ?assertMatch([{_, {ok, 2}}, {_, {ok, 2}}], Result),
+ Result1 = rpc:call(Server0, rabbit_quorum_queue, shrink_all, [Server1]),
+ ?assertMatch([{_, {ok, 1}}, {_, {ok, 1}}], Result1),
+ Result2 = rpc:call(Server0, rabbit_quorum_queue, shrink_all, [Server0]),
+ ?assertMatch([{_, {error, 1, last_node}},
+ {_, {error, 1, last_node}}], Result2),
+ ok.
+
+rebalance(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "rebalance tests isn't mixed version compatible"};
+ false ->
+ rebalance0(Config)
+ end.
+
+rebalance0(Config) ->
+ [Server0, _, _] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+
+ ?assertEqual({'queue.declare_ok', Q1, 0, 0},
+ declare(Ch, Q1, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', Q2, 0, 0},
+ declare(Ch, Q2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(1000),
+
+ {ok, _, {_, Leader1}} = ra:members({ra_name(Q1), Server0}),
+ {ok, _, {_, Leader2}} = ra:members({ra_name(Q2), Server0}),
+ rabbit_ct_client_helpers:publish(Ch, Q1, 3),
+ rabbit_ct_client_helpers:publish(Ch, Q2, 2),
+
+ ?assertEqual({'queue.declare_ok', Q3, 0, 0},
+ declare(Ch, Q3, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', Q4, 0, 0},
+ declare(Ch, Q4, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', Q5, 0, 0},
+ declare(Ch, Q5, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(500),
+ {ok, Summary} = rpc:call(Server0, rabbit_amqqueue, rebalance, [quorum, ".*", ".*"]),
+
+ %% Q1 and Q2 should not have moved leader, as these are the queues with more
+ %% log entries and we allow up to two queues per node (3 nodes, 5 queues)
+ ?assertMatch({ok, _, {_, Leader1}}, ra:members({ra_name(Q1), Server0})),
+ ?assertMatch({ok, _, {_, Leader2}}, ra:members({ra_name(Q2), Server0})),
+
+ %% Check that we have at most 2 queues per node
+ ?assert(lists:all(fun(NodeData) ->
+ lists:all(fun({_, V}) when is_integer(V) -> V =< 2;
+ (_) -> true end,
+ NodeData)
+ end, Summary)),
+ ok.
+
+subscribe_should_fail_when_global_qos_true(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ qos(Ch, 10, true),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ try subscribe(Ch, QQ, false) of
+ _ -> exit(subscribe_should_not_pass)
+ catch
+ _:_ = Err ->
+ ct:pal("subscribe_should_fail_when_global_qos_true caught an error: ~p", [Err])
+ end,
+ ok.
+
+dead_letter_to_classic_queue(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ CQ = <<"classic-dead_letter_to_classic_queue">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, CQ}
+ ])),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ test_dead_lettering(true, Config, Ch, Servers, ra_name(QQ), QQ, CQ).
+
+dead_letter_with_memory_limit(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ CQ = <<"classic-dead_letter_with_memory_limit">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 0},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, CQ}
+ ])),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ test_dead_lettering(true, Config, Ch, Servers, ra_name(QQ), QQ, CQ).
+
+test_dead_lettering(PolicySet, Config, Ch, Servers, RaName, Source, Destination) ->
+ publish(Ch, Source),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]),
+ DeliveryTag = consume(Ch, Source, false),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ case PolicySet of
+ true ->
+ wait_for_messages(Config, [[Destination, <<"1">>, <<"1">>, <<"0">>]]),
+ _ = consume(Ch, Destination, true);
+ false ->
+ wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]])
+ end.
+
+dead_letter_policy(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ CQ = <<"classic-dead_letter_policy">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"dlx">>, <<"dead_letter.*">>, <<"queues">>,
+ [{<<"dead-letter-exchange">>, <<"">>},
+ {<<"dead-letter-routing-key">>, CQ}]),
+ RaName = ra_name(QQ),
+ test_dead_lettering(true, Config, Ch, Servers, RaName, QQ, CQ),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"dlx">>),
+ test_dead_lettering(false, Config, Ch, Servers, RaName, QQ, CQ).
+
+invalid_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ha">>, <<"invalid_policy.*">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}]),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ttl">>, <<"invalid_policy.*">>, <<"queues">>,
+ [{<<"message-ttl">>, 5}]),
+ Info = rpc:call(Server, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QQ)]),
+ ?assertEqual('', proplists:get_value(policy, Info)),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl">>).
+
+dead_letter_to_quorum_queue(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ QQ2 = <<"dead_letter_to_quorum_queue-q2">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, QQ2}
+ ])),
+ ?assertEqual({'queue.declare_ok', QQ2, 0, 0},
+ declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ RaName2 = ra_name(QQ2),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages_ready(Servers, RaName2, 0),
+ wait_for_messages_pending_ack(Servers, RaName2, 0),
+ DeliveryTag = consume(Ch, QQ, false),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ wait_for_messages_ready(Servers, RaName2, 0),
+ wait_for_messages_pending_ack(Servers, RaName2, 0),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages_ready(Servers, RaName2, 1),
+ wait_for_messages_pending_ack(Servers, RaName2, 0),
+ _ = consume(Ch, QQ2, false).
+
+dead_letter_from_classic_to_quorum_queue(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ CQ = <<"classic-q-dead_letter_from_classic_to_quorum_queue">>,
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0},
+ declare(Ch, CQ, [{<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, QQ}
+ ])),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ publish(Ch, CQ),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages(Config, [[CQ, <<"1">>, <<"1">>, <<"0">>]]),
+ DeliveryTag = consume(Ch, CQ, false),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages(Config, [[CQ, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages(Config, [[CQ, <<"0">>, <<"0">>, <<"0">>]]),
+ _ = consume(Ch, QQ, false),
+ rabbit_ct_client_helpers:close_channel(Ch).
+
+cleanup_queue_state_on_channel_after_publish(Config) ->
+ %% Declare/delete the queue in one channel and publish on a different one,
+ %% to verify that the cleanup is propagated through channels
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch1, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ publish(Ch2, QQ),
+ Res = dirty_query(Servers, RaName, fun rabbit_fifo:query_consumer_count/1),
+ ct:pal ("Res ~p", [Res]),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages_ready(Servers, RaName, 1),
+ [NCh1, NCh2] = rpc:call(Server, rabbit_channel, list, []),
+ %% Check the channel state contains the state for the quorum queue on
+ %% channel 1 and 2
+ wait_for_cleanup(Server, NCh1, 0),
+ wait_for_cleanup(Server, NCh2, 1),
+ %% then delete the queue and wait for the process to terminate
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch1, #'queue.delete'{queue = QQ})),
+ wait_until(fun() ->
+ Children == length(rpc:call(Server, supervisor, which_children,
+ [ra_server_sup_sup]))
+ end),
+ %% Check that all queue states have been cleaned
+ wait_for_cleanup(Server, NCh2, 0),
+ wait_for_cleanup(Server, NCh1, 0).
+
+cleanup_queue_state_on_channel_after_subscribe(Config) ->
+ %% Declare/delete the queue and publish in one channel, while consuming on a
+ %% different one to verify that the cleanup is propagated through channels
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch1, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ publish(Ch1, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(Ch2, QQ, false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ amqp_channel:cast(Ch2, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = true}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0)
+ end,
+ [NCh1, NCh2] = rpc:call(Server, rabbit_channel, list, []),
+ %% Check the channel state contains the state for the quorum queue on channel 1 and 2
+ wait_for_cleanup(Server, NCh1, 1),
+ wait_for_cleanup(Server, NCh2, 1),
+ ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch1, #'queue.delete'{queue = QQ})),
+ wait_until(fun() ->
+ Children == length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))
+ end),
+ %% Check that all queue states have been cleaned
+ wait_for_cleanup(Server, NCh1, 0),
+ wait_for_cleanup(Server, NCh2, 0).
+
+recover_from_single_failure(Config) ->
+ [Server, Server1, Server2] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+ RaName = ra_name(QQ),
+
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ wait_for_messages_ready([Server, Server1], RaName, 3),
+ wait_for_messages_pending_ack([Server, Server1], RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ wait_for_messages_ready(Servers, RaName, 3),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+recover_from_multiple_failures(Config) ->
+ [Server, Server1, Server2] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ RaName = ra_name(QQ),
+
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ wait_for_messages_ready([Server], RaName, 3),
+ wait_for_messages_pending_ack([Server], RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+
+ %% there is an assumption here that the messages were not lost and were
+ %% recovered when a quorum was restored. Not the best test perhaps.
+ wait_for_messages_ready(Servers, RaName, 6),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+publishing_to_unavailable_queue(Config) ->
+ %% publishing to an unavialable queue but with a reachable member should result
+ %% in the initial enqueuer session timing out and the message being nacked
+ [Server, Server1, Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ TCh = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(TCh, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ ct:pal("opening channel to ~w", [Server]),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish_many(Ch, QQ, 1),
+ %% this should result in a nack
+ ok = receive
+ #'basic.ack'{} -> fail;
+ #'basic.nack'{} -> ok
+ after 90000 ->
+ exit(confirm_timeout)
+ end,
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ timer:sleep(2000),
+ publish_many(Ch, QQ, 1),
+ %% this should now be acked
+ ok = receive
+ #'basic.ack'{} -> ok;
+ #'basic.nack'{} -> fail
+ after 90000 ->
+ exit(confirm_timeout)
+ end,
+ %% check we get at least on ack
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ ok.
+
+leadership_takeover(Config) ->
+ %% Kill nodes in succession forcing the takeover of leadership, and all messages that
+ %% are in the queue.
+ [Server, Server1, Server2] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ RaName = ra_name(QQ),
+
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ wait_for_messages_ready([Server], RaName, 3),
+ wait_for_messages_pending_ack([Server], RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server),
+
+ wait_for_messages_ready([Server2, Server], RaName, 3),
+ wait_for_messages_pending_ack([Server2, Server], RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ wait_for_messages_ready(Servers, RaName, 3),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+metrics_cleanup_on_leadership_takeover(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "metrics_cleanup_on_leadership_takeover tests isn't mixed version compatible"};
+ false ->
+ metrics_cleanup_on_leadership_takeover0(Config)
+ end.
+
+metrics_cleanup_on_leadership_takeover0(Config) ->
+ %% Queue core metrics should be deleted from a node once the leadership is transferred
+ %% to another follower
+ [Server, _, _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ wait_for_messages_ready([Server], RaName, 3),
+ wait_for_messages_pending_ack([Server], RaName, 0),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ QRes = rabbit_misc:r(<<"/">>, queue, QQ),
+ wait_until(
+ fun() ->
+ case rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) of
+ [{QRes, 3, 0, 3, _}] -> true;
+ _ -> false
+ end
+ end),
+ force_leader_change(Servers, QQ),
+ wait_until(fun () ->
+ [] =:= rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) andalso
+ [] =:= rpc:call(Leader, ets, lookup, [queue_metrics, QRes])
+ end),
+ ok.
+
+metrics_cleanup_on_leader_crash(Config) ->
+ %% Queue core metrics should be deleted from a node once the leadership is transferred
+ %% to another follower
+ [Server | _] = Servers =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ wait_for_messages_ready([Server], RaName, 3),
+ wait_for_messages_pending_ack([Server], RaName, 0),
+ {ok, _, {Name, Leader}} = ra:members({RaName, Server}),
+ QRes = rabbit_misc:r(<<"/">>, queue, QQ),
+ wait_until(
+ fun() ->
+ case rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) of
+ [{QRes, 3, 0, 3, _}] -> true;
+ _ -> false
+ end
+ end),
+ Pid = rpc:call(Leader, erlang, whereis, [Name]),
+ rpc:call(Leader, erlang, exit, [Pid, kill]),
+ [Other | _] = lists:delete(Leader, Servers),
+ catch ra:trigger_election(Other),
+ %% kill it again just in case it came straight back up again
+ catch rpc:call(Leader, erlang, exit, [Pid, kill]),
+
+ %% this isn't a reliable test as the leader can be restarted so quickly
+ %% after a crash it is elected leader of the next term as well.
+ wait_until(
+ fun() ->
+ [] == rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes])
+ end),
+ ok.
+
+
+delete_declare(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "delete_declare isn't mixed version reliable"};
+ false ->
+ delete_declare0(Config)
+ end.
+
+delete_declare0(Config) ->
+ %% Delete cluster in ra is asynchronous, we have to ensure that we handle that in rmq
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 3),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = QQ})),
+ %% the actual data deletions happen after the call has returned as a quorum
+ %% queue leader waits for all nodes to confirm they replicated the poison
+ %% pill before terminating itself.
+ case is_mixed_versions() of
+ true ->
+ %% when in mixed versions the QQ may not be able to apply the posion
+ %% pill for all nodes so need to wait longer for forced delete to
+ %% happen
+ timer:sleep(10000);
+ false ->
+ timer:sleep(1000)
+ end,
+
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Ensure that is a new queue and it's empty
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+sync_queue(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ {error, _, _} =
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QQ]),
+ ok.
+
+cancel_sync_queue(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ {error, _, _} =
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"cancel_sync_queue">>, QQ]),
+ ok.
+
+declare_during_node_down(Config) ->
+ [Server, DownServer, _] = Servers = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+
+ stop_node(Config, DownServer),
+ % rabbit_ct_broker_helpers:stop_node(Config, DownServer),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ timer:sleep(2000),
+ rabbit_ct_broker_helpers:start_node(Config, DownServer),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ ok.
+
+simple_confirm_availability_on_leader_change(Config) ->
+ [Node1, Node2, _Node3] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% declare a queue on node2 - this _should_ host the leader on node 2
+ DCh = rabbit_ct_client_helpers:open_channel(Config, Node2),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(DCh, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ erlang:process_flag(trap_exit, true),
+ %% open a channel to another node
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node1),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ ok = publish_confirm(Ch, QQ),
+
+ %% stop the node hosting the leader
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+ %% this should not fail as the channel should detect the new leader and
+ %% resend to that
+ ok = publish_confirm(Ch, QQ),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Node2),
+ ok.
+
+confirm_availability_on_leader_change(Config) ->
+ [Node1, Node2, _Node3] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% declare a queue on node2 - this _should_ host the leader on node 2
+ DCh = rabbit_ct_client_helpers:open_channel(Config, Node2),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(DCh, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ erlang:process_flag(trap_exit, true),
+ Pid = spawn_link(fun () ->
+ %% open a channel to another node
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node1),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ ConfirmLoop = fun Loop() ->
+ ok = publish_confirm(Ch, QQ),
+ receive {done, P} ->
+ P ! done,
+ ok
+ after 0 -> Loop() end
+ end,
+ ConfirmLoop()
+ end),
+
+ timer:sleep(500),
+ %% stop the node hosting the leader
+ stop_node(Config, Node2),
+ %% this should not fail as the channel should detect the new leader and
+ %% resend to that
+ timer:sleep(500),
+ Pid ! {done, self()},
+ receive
+ done -> ok;
+ {'EXIT', Pid, Err} ->
+ exit(Err)
+ after 5500 ->
+ flush(100),
+ exit(bah)
+ end,
+ ok = rabbit_ct_broker_helpers:start_node(Config, Node2),
+ ok.
+
+flush(T) ->
+ receive X ->
+ ct:pal("flushed ~w", [X]),
+ flush(T)
+ after T ->
+ ok
+ end.
+
+
+add_member_not_running(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ ct:pal("add_member_not_running config ~p", [Config]),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, 'rabbit@burrow', 5000])).
+
+add_member_classic(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ CQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server, rabbit_quorum_queue, add_member,
+ [<<"/">>, CQ, Server, 5000])).
+
+add_member_already_a_member(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ %% idempotent by design
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, Server, 5000])).
+
+add_member_not_found(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({error, not_found},
+ rpc:call(Server, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, Server, 5000])).
+
+add_member(Config) ->
+ [Server0, Server1] = Servers0 =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server0, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, Server1, 5000])),
+ ok = rabbit_control_helper:command(stop_app, Server1),
+ ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server1),
+ ?assertEqual(ok, rpc:call(Server0, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, Server1, 5000])),
+ Info = rpc:call(Server0, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QQ)]),
+ Servers = lists:sort(Servers0),
+ ?assertEqual(Servers, lists:sort(proplists:get_value(online, Info, []))).
+
+delete_member_not_running(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ %% it should be possible to delete members that are not online (e.g. decomissioned)
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, 'rabbit@burrow'])).
+
+delete_member_classic(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ CQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, CQ, Server])).
+
+delete_member_queue_not_found(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({error, not_found},
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Server])).
+
+delete_member(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Server])).
+
+delete_member_not_a_member(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Server])),
+ %% idempotent by design
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Server])).
+
+delete_member_during_node_down(Config) ->
+ [Server, DownServer, Remove] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+
+ stop_node(Config, DownServer),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(200),
+ ?assertEqual(ok, rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Remove])),
+
+ rabbit_ct_broker_helpers:start_node(Config, DownServer),
+ ?assertEqual(ok, rpc:call(Server, rabbit_quorum_queue, repair_amqqueue_nodes,
+ [<<"/">>, QQ])),
+ ok.
+
+%% These tests check if node removal would cause any queues to lose (or not lose)
+%% their quorum. See rabbitmq/rabbitmq-cli#389 for background.
+
+node_removal_is_quorum_critical(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+ [begin
+ Qs = rpc:call(S, rabbit_quorum_queue, list_with_minimum_quorum, []),
+ ?assertEqual([QName], queue_names(Qs))
+ end || S <- Servers].
+
+node_removal_is_not_quorum_critical(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+ Qs = rpc:call(Server, rabbit_quorum_queue, list_with_minimum_quorum, []),
+ ?assertEqual([], Qs).
+
+
+file_handle_reservations(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "file_handle_reservations tests isn't mixed version compatible"};
+ false ->
+ file_handle_reservations0(Config)
+ end.
+
+file_handle_reservations0(Config) ->
+ Servers = [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server1}),
+ [Follower1, Follower2] = Servers -- [Leader],
+ ?assertEqual([{files_reserved, 5}],
+ rpc:call(Leader, file_handle_cache, info, [[files_reserved]])),
+ ?assertEqual([{files_reserved, 2}],
+ rpc:call(Follower1, file_handle_cache, info, [[files_reserved]])),
+ ?assertEqual([{files_reserved, 2}],
+ rpc:call(Follower2, file_handle_cache, info, [[files_reserved]])),
+ force_leader_change(Servers, QQ),
+ {ok, _, {_, Leader0}} = ra:members({RaName, Server1}),
+ [Follower01, Follower02] = Servers -- [Leader0],
+ ?assertEqual([{files_reserved, 5}],
+ rpc:call(Leader0, file_handle_cache, info, [[files_reserved]])),
+ ?assertEqual([{files_reserved, 2}],
+ rpc:call(Follower01, file_handle_cache, info, [[files_reserved]])),
+ ?assertEqual([{files_reserved, 2}],
+ rpc:call(Follower02, file_handle_cache, info, [[files_reserved]])).
+
+file_handle_reservations_above_limit(Config) ->
+ [S1, S2, S3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, S1),
+ QQ = ?config(queue_name, Config),
+ QQ2 = ?config(alt_queue_name, Config),
+
+ Limit = rpc:call(S1, file_handle_cache, get_limit, []),
+
+ ok = rpc:call(S1, file_handle_cache, set_limit, [3]),
+ ok = rpc:call(S2, file_handle_cache, set_limit, [3]),
+ ok = rpc:call(S3, file_handle_cache, set_limit, [3]),
+
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', QQ2, 0, 0},
+ declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rpc:call(S1, file_handle_cache, set_limit, [Limit]),
+ ok = rpc:call(S2, file_handle_cache, set_limit, [Limit]),
+ ok = rpc:call(S3, file_handle_cache, set_limit, [Limit]).
+
+cleanup_data_dir(Config) ->
+ %% This test is slow, but also checks that we handle properly errors when
+ %% trying to delete a queue in minority. A case clause there had gone
+ %% previously unnoticed.
+
+ [Server1, Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+
+ UId1 = proplists:get_value(ra_name(QQ), rpc:call(Server1, ra_directory, list_registered, [])),
+ UId2 = proplists:get_value(ra_name(QQ), rpc:call(Server2, ra_directory, list_registered, [])),
+ DataDir1 = rpc:call(Server1, ra_env, server_data_dir, [UId1]),
+ DataDir2 = rpc:call(Server2, ra_env, server_data_dir, [UId2]),
+ ?assert(filelib:is_dir(DataDir1)),
+ ?assert(filelib:is_dir(DataDir2)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = QQ})),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+ %% data dir 1 should be force deleted at this point
+ ?assert(not filelib:is_dir(DataDir1)),
+ ?assert(filelib:is_dir(DataDir2)),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ timer:sleep(2000),
+
+ ?assertEqual(ok,
+ rpc:call(Server2, rabbit_quorum_queue, cleanup_data_dir, [])),
+ ?assert(not filelib:is_dir(DataDir2)),
+ ok.
+
+reconnect_consumer_and_publish(Config) ->
+ [Server | _] = Servers =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ [F1, F2] = lists:delete(Leader, Servers),
+ ChF = rabbit_ct_client_helpers:open_channel(Config, F1),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(ChF, QQ, false),
+ receive
+ {#'basic.deliver'{redelivered = false}, _} ->
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1)
+ end,
+ Up = [Leader, F2],
+ rabbit_ct_broker_helpers:block_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:block_traffic_between(F1, F2),
+ wait_for_messages_ready(Up, RaName, 1),
+ wait_for_messages_pending_ack(Up, RaName, 0),
+ wait_for_messages_ready([F1], RaName, 0),
+ wait_for_messages_pending_ack([F1], RaName, 1),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, F2),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 2),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ amqp_channel:cast(ChF, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1)
+ end,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag2,
+ redelivered = true}, _} ->
+ amqp_channel:cast(ChF, #'basic.ack'{delivery_tag = DeliveryTag2,
+ multiple = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0)
+ end.
+
+reconnect_consumer_and_wait(Config) ->
+ [Server | _] = Servers =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ [F1, F2] = lists:delete(Leader, Servers),
+ ChF = rabbit_ct_client_helpers:open_channel(Config, F1),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(ChF, QQ, false),
+ receive
+ {#'basic.deliver'{redelivered = false}, _} ->
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1)
+ end,
+ Up = [Leader, F2],
+ rabbit_ct_broker_helpers:block_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:block_traffic_between(F1, F2),
+ wait_for_messages_ready(Up, RaName, 1),
+ wait_for_messages_pending_ack(Up, RaName, 0),
+ wait_for_messages_ready([F1], RaName, 0),
+ wait_for_messages_pending_ack([F1], RaName, 1),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, F2),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = true}, _} ->
+ amqp_channel:cast(ChF, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0)
+ end.
+
+reconnect_consumer_and_wait_channel_down(Config) ->
+ [Server | _] = Servers =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ [F1, F2] = lists:delete(Leader, Servers),
+ ChF = rabbit_ct_client_helpers:open_channel(Config, F1),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(ChF, QQ, false),
+ receive
+ {#'basic.deliver'{redelivered = false}, _} ->
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1)
+ end,
+ Up = [Leader, F2],
+ rabbit_ct_broker_helpers:block_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:block_traffic_between(F1, F2),
+ wait_for_messages_ready(Up, RaName, 1),
+ wait_for_messages_pending_ack(Up, RaName, 0),
+ wait_for_messages_ready([F1], RaName, 0),
+ wait_for_messages_pending_ack([F1], RaName, 1),
+ rabbit_ct_client_helpers:close_channel(ChF),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, F2),
+ %% Let's give it a few seconds to ensure it doesn't attempt to
+ %% deliver to the down channel - it shouldn't be monitored
+ %% at this time!
+ timer:sleep(5000),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+delete_immediately_by_resource(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ Cmd2 = ["eval", "rabbit_amqqueue:delete_immediately_by_resource([rabbit_misc:r(<<\"/\">>, queue, <<\"" ++ binary_to_list(QQ) ++ "\">>)])."],
+ ?assertEqual({ok, "ok\n"}, rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd2)),
+
+ %% Check that the application and process are down
+ wait_until(fun() ->
+ Children == length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))
+ end),
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))).
+
+subscribe_redelivery_count(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(Ch, QQ, false),
+
+ DCHeader = <<"x-delivery-count">>,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} ->
+ ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true})
+ after 5000 ->
+ exit(basic_deliver_timeout)
+ end,
+
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} ->
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true})
+ after 5000 ->
+ exit(basic_deliver_timeout_2)
+ end,
+
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag2,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H2}}} ->
+ ?assertMatch({DCHeader, _, 2}, rabbit_basic:header(DCHeader, H2)),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2,
+ multiple = false}),
+ ct:pal("wait_for_messages_ready", []),
+ wait_for_messages_ready(Servers, RaName, 0),
+ ct:pal("wait_for_messages_pending_ack", []),
+ wait_for_messages_pending_ack(Servers, RaName, 0)
+ after 5000 ->
+ flush(500),
+ exit(basic_deliver_timeout_3)
+ end.
+
+subscribe_redelivery_limit(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-delivery-limit">>, long, 1}])),
+
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QQ, false),
+
+ DCHeader = <<"x-delivery-count">>,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} ->
+ ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} ->
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]),
+ receive
+ {#'basic.deliver'{redelivered = true}, #amqp_msg{}} ->
+ throw(unexpected_redelivery)
+ after 2000 ->
+ ok
+ end.
+
+subscribe_redelivery_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"delivery-limit">>, <<".*">>, <<"queues">>,
+ [{<<"delivery-limit">>, 1}]),
+
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QQ, false),
+
+ DCHeader = <<"x-delivery-count">>,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} ->
+ ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} ->
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]),
+ receive
+ {#'basic.deliver'{redelivered = true}, #amqp_msg{}} ->
+ throw(unexpected_redelivery)
+ after 2000 ->
+ ok
+ end,
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"delivery-limit">>).
+
+subscribe_redelivery_limit_with_dead_letter(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ DLX = <<"subcribe_redelivery_limit_with_dead_letter_dlx">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-delivery-limit">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, DLX}
+ ])),
+ ?assertEqual({'queue.declare_ok', DLX, 0, 0},
+ declare(Ch, DLX, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QQ, false),
+
+ DCHeader = <<"x-delivery-count">>,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} ->
+ ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} ->
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]),
+ wait_for_messages(Config, [[DLX, <<"1">>, <<"1">>, <<"0">>]]).
+
+consume_redelivery_count(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+
+ DCHeader = <<"x-delivery-count">>,
+
+ {#'basic.get_ok'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false}),
+ ?assertMatch({DCHeader, _, 0}, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ %% wait for requeuing
+ timer:sleep(500),
+
+ {#'basic.get_ok'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false}),
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true}),
+
+ {#'basic.get_ok'{delivery_tag = DeliveryTag2,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false}),
+ ?assertMatch({DCHeader, _, 2}, rabbit_basic:header(DCHeader, H2)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag2,
+ multiple = false,
+ requeue = true}),
+ ok.
+
+message_bytes_metrics(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ QRes = rabbit_misc:r(<<"/">>, queue, QQ),
+
+ publish(Ch, QQ),
+
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_until(fun() ->
+ {3, 3, 0} == get_message_bytes(Leader, QRes)
+ end),
+
+ subscribe(Ch, QQ, false),
+
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ wait_until(fun() ->
+ {3, 0, 3} == get_message_bytes(Leader, QRes)
+ end),
+
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_until(fun() ->
+ {0, 0, 0} == get_message_bytes(Leader, QRes)
+ end)
+ end,
+
+ %% Let's publish and then close the consumer channel. Messages must be
+ %% returned to the queue
+ publish(Ch, QQ),
+
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ wait_until(fun() ->
+ {3, 0, 3} == get_message_bytes(Leader, QRes)
+ end),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_until(fun() ->
+ {3, 3, 0} == get_message_bytes(Leader, QRes)
+ end),
+ ok.
+
+memory_alarm_rolls_wal(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ WalDataDir = rpc:call(Server, ra_env, wal_data_dir, []),
+ [Wal0] = filelib:wildcard(WalDataDir ++ "/*.wal"),
+ rabbit_ct_broker_helpers:set_alarm(Config, Server, memory),
+ rabbit_ct_helpers:await_condition(
+ fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end
+ ),
+ timer:sleep(1000),
+ [Wal1] = filelib:wildcard(WalDataDir ++ "/*.wal"),
+ ?assert(Wal0 =/= Wal1),
+ %% roll over shouldn't happen if we trigger a new alarm in less than
+ %% min_wal_roll_over_interval
+ rabbit_ct_broker_helpers:set_alarm(Config, Server, memory),
+ rabbit_ct_helpers:await_condition(
+ fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end
+ ),
+ timer:sleep(1000),
+ [Wal2] = filelib:wildcard(WalDataDir ++ "/*.wal"),
+ ?assert(Wal1 == Wal2),
+ ok = rpc:call(Server, rabbit_alarm, clear_alarm,
+ [{{resource_limit, memory, Server}, []}]),
+ timer:sleep(1000),
+ ok.
+
+queue_length_limit_drop_head(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-length">>, long, 1}])),
+
+ RaName = ra_name(QQ),
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = <<"msg1">>}),
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = <<"msg2">>}),
+ wait_for_consensus(QQ, Config),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages_total(Servers, RaName, 1),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})).
+
+queue_length_limit_reject_publish(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ RaName = ra_name(QQ),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-length">>, long, 1},
+ {<<"x-overflow">>, longstr, <<"reject-publish">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ ok = publish_confirm(Ch, QQ),
+ ok = publish_confirm(Ch, QQ),
+ %% give the channel some time to process the async reject_publish notification
+ %% now that we are over the limit it should start failing
+ wait_for_messages_total(Servers, RaName, 2),
+ fail = publish_confirm(Ch, QQ),
+ %% remove all messages
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = _}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = _}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+ %% publish should be allowed again now
+ ok = publish_confirm(Ch, QQ),
+ ok.
+
+queue_length_in_memory_limit_basic_get(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 1}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg1}),
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = <<"msg2">>}),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})).
+
+queue_length_in_memory_limit_subscribe(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 1}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ subscribe(Ch, QQ, false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = false},
+ #amqp_msg{payload = Msg1}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1,
+ multiple = false})
+ end,
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag2,
+ redelivered = false},
+ #amqp_msg{payload = Msg2}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2,
+ multiple = false})
+ end.
+
+queue_length_in_memory_limit(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 2}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ Msg3 = <<"msg111">>,
+ Msg4 = <<"msg1111">>,
+
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ publish(Ch, QQ, Msg3),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+ publish(Ch, QQ, Msg4),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg2) + byte_size(Msg4)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+queue_length_in_memory_limit_returns(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 2}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ Msg3 = <<"msg111">>,
+ Msg4 = <<"msg111">>,
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false})),
+
+ {#'basic.get_ok'{delivery_tag = DTag2}, #amqp_msg{payload = Msg2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false}),
+
+ publish(Ch, QQ, Msg3),
+ publish(Ch, QQ, Msg4),
+
+ %% Ensure that returns are subject to in memory limits too
+ wait_for_messages(Config, [[QQ, <<"4">>, <<"2">>, <<"2">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = true,
+ requeue = true}),
+ wait_for_messages(Config, [[QQ, <<"4">>, <<"4">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg3) + byte_size(Msg4)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+queue_length_in_memory_bytes_limit_basic_get(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-bytes">>, long, 6}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg1}),
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = <<"msg2">>}),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})).
+
+queue_length_in_memory_bytes_limit_subscribe(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-bytes">>, long, 6}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ subscribe(Ch, QQ, false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = false},
+ #amqp_msg{payload = Msg1}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1,
+ multiple = false})
+ end,
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag2,
+ redelivered = false},
+ #amqp_msg{payload = Msg2}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2,
+ multiple = false})
+ end.
+
+queue_length_in_memory_bytes_limit(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-bytes">>, long, 12}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ Msg3 = <<"msg111">>,
+ Msg4 = <<"msg1111">>,
+
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ publish(Ch, QQ, Msg3),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+ publish(Ch, QQ, Msg4),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg2) + byte_size(Msg4)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+queue_length_in_memory_purge(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 2}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ Msg3 = <<"msg111">>,
+
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ publish(Ch, QQ, Msg3),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ {'queue.purge_ok', 3} = amqp_channel:call(Ch, #'queue.purge'{queue = QQ}),
+
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+peek(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 2}])),
+
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+
+ QName = rabbit_misc:r(<<"/">>, queue, QQ),
+ ?assertMatch({error, no_message_at_pos},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue,
+ peek, [1, QName])),
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertMatch({ok, [_|_]},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue,
+ peek, [1, QName])),
+ ?assertMatch({ok, [_|_]},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue,
+ peek, [2, QName])),
+ ?assertMatch({error, no_message_at_pos},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue,
+ peek, [3, QName])),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+ ok.
+
+in_memory(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+
+ publish(Ch, QQ, Msg1),
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ subscribe(Ch, QQ, false),
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ publish(Ch, QQ, Msg2),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"0">>, <<"2">>]]),
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, #amqp_msg{}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+consumer_metrics(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch1, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ subscribe(Ch1, QQ, false),
+
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ timer:sleep(5000),
+ QNameRes = rabbit_misc:r(<<"/">>, queue, QQ),
+ [{_, PropList, _}] = rpc:call(Leader, ets, lookup, [queue_metrics, QNameRes]),
+ ?assertMatch([{consumers, 1}], lists:filter(fun({Key, _}) ->
+ Key == consumers
+ end, PropList)).
+
+delete_if_empty(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Try to delete the quorum queue
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ amqp_channel:call(Ch, #'queue.delete'{queue = QQ,
+ if_empty = true})).
+
+delete_if_unused(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Try to delete the quorum queue
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ amqp_channel:call(Ch, #'queue.delete'{queue = QQ,
+ if_unused = true})).
+
+queue_ttl(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-expires">>, long, 1000}])),
+ timer:sleep(5500),
+ %% check queue no longer exists
+ ?assertExit(
+ {{shutdown,
+ {server_initiated_close,404,
+ <<"NOT_FOUND - no queue 'queue_ttl' in vhost '/'">>}},
+ _},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QQ,
+ passive = true,
+ durable = true,
+ auto_delete = false,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-expires">>, long, 1000}]})),
+ ok.
+
+consumer_priorities(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch, 2, false),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% consumer with default priority
+ Tag1 = <<"ctag1">>,
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = QQ,
+ no_ack = false,
+ consumer_tag = Tag1},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = Tag1} ->
+ ok
+ end,
+ %% consumer with higher priority
+ Tag2 = <<"ctag2">>,
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = QQ,
+ arguments = [{"x-priority", long, 10}],
+ no_ack = false,
+ consumer_tag = Tag2},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = Tag2} ->
+ ok
+ end,
+
+ publish(Ch, QQ),
+ %% Tag2 should receive the message
+ DT1 = receive
+ {#'basic.deliver'{delivery_tag = D1,
+ consumer_tag = Tag2}, _} ->
+ D1
+ after 5000 ->
+ flush(100),
+ ct:fail("basic.deliver timeout")
+ end,
+ publish(Ch, QQ),
+ %% Tag2 should receive the message
+ receive
+ {#'basic.deliver'{delivery_tag = _,
+ consumer_tag = Tag2}, _} ->
+ ok
+ after 5000 ->
+ flush(100),
+ ct:fail("basic.deliver timeout")
+ end,
+
+ publish(Ch, QQ),
+ %% Tag1 should receive the message as Tag2 has maxed qos
+ receive
+ {#'basic.deliver'{delivery_tag = _,
+ consumer_tag = Tag1}, _} ->
+ ok
+ after 5000 ->
+ flush(100),
+ ct:fail("basic.deliver timeout")
+ end,
+
+ ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DT1,
+ multiple = false}),
+ publish(Ch, QQ),
+ %% Tag2 should receive the message
+ receive
+ {#'basic.deliver'{delivery_tag = _,
+ consumer_tag = Tag2}, _} ->
+ ok
+ after 5000 ->
+ flush(100),
+ ct:fail("basic.deliver timeout")
+ end,
+
+ ok.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+
+assert_queue_type(Server, Q, Expected) ->
+ Actual = get_queue_type(Server, Q),
+ Expected = Actual.
+
+get_queue_type(Server, Q0) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Q0),
+ {ok, Q1} = rpc:call(Server, rabbit_amqqueue, lookup, [QNameRes]),
+ amqqueue:get_type(Q1).
+
+publish_many(Ch, Queue, Count) ->
+ [publish(Ch, Queue) || _ <- lists:seq(1, Count)].
+
+publish(Ch, Queue) ->
+ publish(Ch, Queue, <<"msg">>).
+
+publish(Ch, Queue, Msg) ->
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg}).
+
+consume(Ch, Queue, NoAck) ->
+ {GetOk, _} = Reply = amqp_channel:call(Ch, #'basic.get'{queue = Queue,
+ no_ack = NoAck}),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, Reply),
+ GetOk#'basic.get_ok'.delivery_tag.
+
+consume_empty(Ch, Queue, NoAck) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{queue = Queue,
+ no_ack = NoAck})).
+
+subscribe(Ch, Queue, NoAck) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+qos(Ch, Prefetch, Global) ->
+ ?assertMatch(#'basic.qos_ok'{},
+ amqp_channel:call(Ch, #'basic.qos'{global = Global,
+ prefetch_count = Prefetch})).
+
+receive_basic_deliver(Redelivered) ->
+ receive
+ {#'basic.deliver'{redelivered = R}, _} when R == Redelivered ->
+ ok
+ end.
+
+wait_for_cleanup(Server, Channel, Number) ->
+ wait_for_cleanup(Server, Channel, Number, 60).
+
+wait_for_cleanup(Server, Channel, Number, 0) ->
+ ?assertEqual(length(rpc:call(Server, rabbit_channel, list_queue_states, [Channel])),
+ Number);
+wait_for_cleanup(Server, Channel, Number, N) ->
+ case length(rpc:call(Server, rabbit_channel, list_queue_states, [Channel])) of
+ Length when Number == Length ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ wait_for_cleanup(Server, Channel, Number, N - 1)
+ end.
+
+wait_until(Condition) ->
+ wait_until(Condition, 60).
+
+wait_until(Condition, 0) ->
+ ?assertEqual(true, Condition());
+wait_until(Condition, N) ->
+ case Condition() of
+ true ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ wait_until(Condition, N - 1)
+ end.
+
+
+force_leader_change([Server | _] = Servers, Q) ->
+ RaName = ra_name(Q),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ [F1, _] = Servers -- [Leader],
+ ok = rpc:call(F1, ra, trigger_election, [{RaName, F1}]),
+ case ra:members({RaName, Leader}) of
+ {ok, _, {_, Leader}} ->
+ %% Leader has been re-elected
+ force_leader_change(Servers, Q);
+ {ok, _, _} ->
+ %% Leader has changed
+ ok
+ end.
+
+delete_queues() ->
+ [rabbit_amqqueue:delete(Q, false, false, <<"dummy">>)
+ || Q <- rabbit_amqqueue:list()].
+
+stop_node(Config, Server) ->
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, Server, ["stop"]).
+
+get_message_bytes(Leader, QRes) ->
+ case rpc:call(Leader, ets, lookup, [queue_metrics, QRes]) of
+ [{QRes, Props, _}] ->
+ {proplists:get_value(message_bytes, Props),
+ proplists:get_value(message_bytes_ready, Props),
+ proplists:get_value(message_bytes_unacknowledged, Props)};
+ _ ->
+ []
+ end.
+
+wait_for_consensus(Name, Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ RaName = ra_name(Name),
+ {ok, _, _} = ra:members({RaName, Server}).
+
+queue_names(Records) ->
+ [begin
+ #resource{name = Name} = amqqueue:get_name(Q),
+ Name
+ end || Q <- Records].
diff --git a/deps/rabbit/test/quorum_queue_utils.erl b/deps/rabbit/test/quorum_queue_utils.erl
new file mode 100644
index 0000000000..224abeeeeb
--- /dev/null
+++ b/deps/rabbit/test/quorum_queue_utils.erl
@@ -0,0 +1,112 @@
+-module(quorum_queue_utils).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-export([
+ wait_for_messages_ready/3,
+ wait_for_messages_pending_ack/3,
+ wait_for_messages_total/3,
+ wait_for_messages/2,
+ dirty_query/3,
+ ra_name/1,
+ fifo_machines_use_same_version/1,
+ fifo_machines_use_same_version/2,
+ is_mixed_versions/0
+ ]).
+
+wait_for_messages_ready(Servers, QName, Ready) ->
+ wait_for_messages(Servers, QName, Ready,
+ fun rabbit_fifo:query_messages_ready/1, 60).
+
+wait_for_messages_pending_ack(Servers, QName, Ready) ->
+ wait_for_messages(Servers, QName, Ready,
+ fun rabbit_fifo:query_messages_checked_out/1, 60).
+
+wait_for_messages_total(Servers, QName, Total) ->
+ wait_for_messages(Servers, QName, Total,
+ fun rabbit_fifo:query_messages_total/1, 60).
+
+wait_for_messages(Servers, QName, Number, Fun, 0) ->
+ Msgs = dirty_query(Servers, QName, Fun),
+ ?assertEqual([Number || _ <- lists:seq(1, length(Servers))], Msgs);
+wait_for_messages(Servers, QName, Number, Fun, N) ->
+ Msgs = dirty_query(Servers, QName, Fun),
+ ct:pal("Got messages ~p ~p", [QName, Msgs]),
+ %% hack to allow the check to succeed in mixed versions clusters if at
+ %% least one node matches the criteria rather than all nodes for
+ F = case is_mixed_versions() of
+ true ->
+ any;
+ false ->
+ all
+ end,
+ case lists:F(fun(C) when is_integer(C) ->
+ C == Number;
+ (_) ->
+ false
+ end, Msgs) of
+ true ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ wait_for_messages(Servers, QName, Number, Fun, N - 1)
+ end.
+
+wait_for_messages(Config, Stats) ->
+ wait_for_messages(Config, lists:sort(Stats), 60).
+
+wait_for_messages(Config, Stats, 0) ->
+ ?assertEqual(Stats,
+ lists:sort(
+ filter_queues(Stats,
+ rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name", "messages", "messages_ready",
+ "messages_unacknowledged"]))));
+wait_for_messages(Config, Stats, N) ->
+ case lists:sort(
+ filter_queues(Stats,
+ rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name", "messages", "messages_ready",
+ "messages_unacknowledged"]))) of
+ Stats0 when Stats0 == Stats ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ wait_for_messages(Config, Stats, N - 1)
+ end.
+
+dirty_query(Servers, QName, Fun) ->
+ lists:map(
+ fun(N) ->
+ case rpc:call(N, ra, local_query, [{QName, N}, Fun]) of
+ {ok, {_, Msgs}, _} ->
+ Msgs;
+ _E ->
+ undefined
+ end
+ end, Servers).
+
+ra_name(Q) ->
+ binary_to_atom(<<"%2F_", Q/binary>>, utf8).
+
+filter_queues(Expected, Got) ->
+ Keys = [K || [K, _, _, _] <- Expected],
+ lists:filter(fun([K, _, _, _]) ->
+ lists:member(K, Keys)
+ end, Got).
+
+fifo_machines_use_same_version(Config) ->
+ Nodenames = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ fifo_machines_use_same_version(Config, Nodenames).
+
+fifo_machines_use_same_version(Config, Nodenames)
+ when length(Nodenames) >= 1 ->
+ [MachineAVersion | OtherMachinesVersions] =
+ [(catch rabbit_ct_broker_helpers:rpc(
+ Config, Nodename,
+ rabbit_fifo, version, []))
+ || Nodename <- Nodenames],
+ lists:all(fun(V) -> V =:= MachineAVersion end, OtherMachinesVersions).
+
+is_mixed_versions() ->
+ not (false == os:getenv("SECONDARY_UMBRELLA")).
diff --git a/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl b/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl
new file mode 100644
index 0000000000..e721f5e0dd
--- /dev/null
+++ b/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl
@@ -0,0 +1,46 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% A mock authn/authz that records information during calls. For testing purposes only.
+
+-module(rabbit_auth_backend_context_propagation_mock).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4,
+ state_can_expire/0,
+ get/1, init/0]).
+
+init() ->
+ ets:new(?MODULE, [set, public, named_table]).
+
+user_login_authentication(_, AuthProps) ->
+ ets:insert(?MODULE, {authentication, AuthProps}),
+ {ok, #auth_user{username = <<"dummy">>,
+ tags = [],
+ impl = none}}.
+
+user_login_authorization(_, _) ->
+ {ok, does_not_matter}.
+
+check_vhost_access(#auth_user{}, _VHostPath, AuthzData) ->
+ ets:insert(?MODULE, {vhost_access, AuthzData}),
+ true.
+check_resource_access(#auth_user{}, #resource{}, _Permission, AuthzContext) ->
+ ets:insert(?MODULE, {resource_access, AuthzContext}),
+ true.
+check_topic_access(#auth_user{}, #resource{}, _Permission, TopicContext) ->
+ ets:insert(?MODULE, {topic_access, TopicContext}),
+ true.
+
+state_can_expire() -> false.
+
+get(K) ->
+ ets:lookup(?MODULE, K).
diff --git a/deps/rabbit/test/rabbit_confirms_SUITE.erl b/deps/rabbit/test/rabbit_confirms_SUITE.erl
new file mode 100644
index 0000000000..331c3ca7c3
--- /dev/null
+++ b/deps/rabbit/test/rabbit_confirms_SUITE.erl
@@ -0,0 +1,154 @@
+-module(rabbit_confirms_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ confirm,
+ reject,
+ remove_queue
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+confirm(_Config) ->
+ XName = rabbit_misc:r(<<"/">>, exchange, <<"X">>),
+ QName = rabbit_misc:r(<<"/">>, queue, <<"Q">>),
+ QName2 = rabbit_misc:r(<<"/">>, queue, <<"Q2">>),
+ U0 = rabbit_confirms:init(),
+ ?assertEqual(0, rabbit_confirms:size(U0)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U0)),
+ ?assertEqual(true, rabbit_confirms:is_empty(U0)),
+
+ U1 = rabbit_confirms:insert(1, [QName], XName, U0),
+ ?assertEqual(1, rabbit_confirms:size(U1)),
+ ?assertEqual(1, rabbit_confirms:smallest(U1)),
+ ?assertEqual(false, rabbit_confirms:is_empty(U1)),
+
+ {[{1, XName}], U2} = rabbit_confirms:confirm([1], QName, U1),
+ ?assertEqual(0, rabbit_confirms:size(U2)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U2)),
+ ?assertEqual(true, rabbit_confirms:is_empty(U2)),
+
+ U3 = rabbit_confirms:insert(2, [QName], XName, U1),
+ ?assertEqual(2, rabbit_confirms:size(U3)),
+ ?assertEqual(1, rabbit_confirms:smallest(U3)),
+ ?assertEqual(false, rabbit_confirms:is_empty(U3)),
+
+ {[{1, XName}], U4} = rabbit_confirms:confirm([1], QName, U3),
+ ?assertEqual(1, rabbit_confirms:size(U4)),
+ ?assertEqual(2, rabbit_confirms:smallest(U4)),
+ ?assertEqual(false, rabbit_confirms:is_empty(U4)),
+
+ U5 = rabbit_confirms:insert(2, [QName, QName2], XName, U1),
+ ?assertEqual(2, rabbit_confirms:size(U5)),
+ ?assertEqual(1, rabbit_confirms:smallest(U5)),
+ ?assertEqual(false, rabbit_confirms:is_empty(U5)),
+
+ {[{1, XName}], U6} = rabbit_confirms:confirm([1, 2], QName, U5),
+ ?assertEqual(2, rabbit_confirms:smallest(U6)),
+
+ {[{2, XName}], U7} = rabbit_confirms:confirm([2], QName2, U6),
+ ?assertEqual(0, rabbit_confirms:size(U7)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U7)),
+
+
+ U8 = rabbit_confirms:insert(2, [QName], XName, U1),
+ {[{1, XName}, {2, XName}], _U9} = rabbit_confirms:confirm([1, 2], QName, U8),
+ ok.
+
+
+reject(_Config) ->
+ XName = rabbit_misc:r(<<"/">>, exchange, <<"X">>),
+ QName = rabbit_misc:r(<<"/">>, queue, <<"Q">>),
+ QName2 = rabbit_misc:r(<<"/">>, queue, <<"Q2">>),
+ U0 = rabbit_confirms:init(),
+ ?assertEqual(0, rabbit_confirms:size(U0)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U0)),
+ ?assertEqual(true, rabbit_confirms:is_empty(U0)),
+
+ U1 = rabbit_confirms:insert(1, [QName], XName, U0),
+
+ {ok, {1, XName}, U2} = rabbit_confirms:reject(1, U1),
+ {error, not_found} = rabbit_confirms:reject(1, U2),
+ ?assertEqual(0, rabbit_confirms:size(U2)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U2)),
+
+ U3 = rabbit_confirms:insert(2, [QName, QName2], XName, U1),
+
+ {ok, {1, XName}, U4} = rabbit_confirms:reject(1, U3),
+ {error, not_found} = rabbit_confirms:reject(1, U4),
+ ?assertEqual(1, rabbit_confirms:size(U4)),
+ ?assertEqual(2, rabbit_confirms:smallest(U4)),
+
+ {ok, {2, XName}, U5} = rabbit_confirms:reject(2, U3),
+ {error, not_found} = rabbit_confirms:reject(2, U5),
+ ?assertEqual(1, rabbit_confirms:size(U5)),
+ ?assertEqual(1, rabbit_confirms:smallest(U5)),
+
+ ok.
+
+remove_queue(_Config) ->
+ XName = rabbit_misc:r(<<"/">>, exchange, <<"X">>),
+ QName = rabbit_misc:r(<<"/">>, queue, <<"Q">>),
+ QName2 = rabbit_misc:r(<<"/">>, queue, <<"Q2">>),
+ U0 = rabbit_confirms:init(),
+
+ U1 = rabbit_confirms:insert(1, [QName, QName2], XName, U0),
+ U2 = rabbit_confirms:insert(2, [QName2], XName, U1),
+ {[{2, XName}], U3} = rabbit_confirms:remove_queue(QName2, U2),
+ ?assertEqual(1, rabbit_confirms:size(U3)),
+ ?assertEqual(1, rabbit_confirms:smallest(U3)),
+ {[{1, XName}], U4} = rabbit_confirms:remove_queue(QName, U3),
+ ?assertEqual(0, rabbit_confirms:size(U4)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U4)),
+
+ U5 = rabbit_confirms:insert(1, [QName], XName, U0),
+ U6 = rabbit_confirms:insert(2, [QName], XName, U5),
+ {[{1, XName}, {2, XName}], _U} = rabbit_confirms:remove_queue(QName, U6),
+
+ ok.
+
+
+%% Utility
diff --git a/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl b/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl
new file mode 100644
index 0000000000..cae5502a0a
--- /dev/null
+++ b/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl
@@ -0,0 +1,392 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_core_metrics_gc_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests},
+ {group, cluster_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [],
+ [ queue_metrics,
+ connection_metrics,
+ channel_metrics,
+ node_metrics,
+ gen_server2_metrics,
+ consumer_metrics
+ ]
+ },
+ {cluster_tests, [], [cluster_queue_metrics]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ AppEnv = {rabbit, [{core_metrics_gc_interval, 6000000},
+ {collect_statistics_interval, 100},
+ {collect_statistics, fine}]},
+ rabbit_ct_helpers:merge_app_env(Config, AppEnv).
+
+init_per_group(cluster_tests, Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Conf = [{rmq_nodename_suffix, cluster_tests}, {rmq_nodes_count, 2}],
+ Config1 = rabbit_ct_helpers:set_config(Config, Conf),
+ rabbit_ct_helpers:run_setup_steps(Config1, setup_steps());
+init_per_group(non_parallel_tests, Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Conf = [{rmq_nodename_suffix, non_parallel_tests}],
+ Config1 = rabbit_ct_helpers:set_config(Config, Conf),
+ rabbit_ct_helpers:run_setup_steps(Config1, setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(
+ Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase),
+ rabbit_ct_helpers:run_teardown_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()).
+
+setup_steps() ->
+ [ fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps().
+
+%% -------------------------------------------------------------------
+%% Single-node Testcases.
+%% -------------------------------------------------------------------
+
+queue_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_metrics">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_metrics">>},
+ #amqp_msg{payload = <<"hello">>}),
+ timer:sleep(150),
+
+ Q = q(<<"myqueue">>),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, queue_stats,
+ [Q, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, queue_stats,
+ [Q, 1, 1, 1, 1]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_metrics, Q]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_coarse_metrics, Q]),
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_coarse_metrics]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_metrics, Q]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_coarse_metrics, Q]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_metrics">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+connection_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_metrics">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_metrics">>},
+ #amqp_msg{payload = <<"hello">>}),
+ timer:sleep(200),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ connection_created, [DeadPid, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ connection_stats, [DeadPid, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ connection_stats, [DeadPid, 1, 1, 1]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_created, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_metrics, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_coarse_metrics, DeadPid]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_created, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_metrics, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_coarse_metrics, DeadPid]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [connection_created]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [connection_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [connection_coarse_metrics]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_metrics">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+channel_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_metrics">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_metrics">>},
+ #amqp_msg{payload = <<"hello">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"won't route $¢% anywhere">>},
+ #amqp_msg{payload = <<"hello">>}),
+ {#'basic.get_ok'{}, _} = amqp_channel:call(Ch, #'basic.get'{queue = <<"queue_metrics">>,
+ no_ack=true}),
+ timer:sleep(150),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ Q = q(<<"myqueue">>),
+ X = x(<<"myexchange">>),
+
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_created, [DeadPid, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [DeadPid, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [reductions, DeadPid, 1]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [exchange_stats, publish,
+ {DeadPid, X}, 1]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [queue_stats, get,
+ {DeadPid, Q}, 1]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [queue_exchange_stats, publish,
+ {DeadPid, {Q, X}}, 1]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_created, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_metrics, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_process_metrics, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_exchange_metrics, {DeadPid, X}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_metrics, {DeadPid, Q}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_exchange_metrics, {DeadPid, {Q, X}}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_created]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_process_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_exchange_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_queue_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_queue_exchange_metrics]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_created, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_metrics, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_process_metrics, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_exchange_metrics, {DeadPid, X}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_metrics, {DeadPid, Q}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_exchange_metrics, {DeadPid, {Q, X}}]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_metrics">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+node_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, node_node_stats,
+ [{node(), 'deer@localhost'}, infos]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_node_metrics, {node(), 'deer@localhost'}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_node_metrics, {node(), 'deer@localhost'}]),
+
+ ok.
+
+gen_server2_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, gen_server2_stats,
+ [DeadPid, 1]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [gen_server2_metrics, DeadPid]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [gen_server2_metrics]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [gen_server2_metrics, DeadPid]),
+
+ ok.
+
+consumer_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_metrics">>}),
+ amqp_channel:call(Ch, #'basic.consume'{queue = <<"queue_metrics">>}),
+ timer:sleep(200),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ QName = q(<<"queue_metrics">>),
+ CTag = <<"tag">>,
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ consumer_created, [DeadPid, CTag, true, true,
+ QName, 1, false, waiting, []]),
+ Id = {QName, DeadPid, CTag},
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup, [consumer_created, Id]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [consumer_created]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup, [consumer_created, Id]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_metrics">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+dead_pid() ->
+ spawn(fun() -> ok end).
+
+q(Name) ->
+ #resource{ virtual_host = <<"/">>,
+ kind = queue,
+ name = Name }.
+
+x(Name) ->
+ #resource{ virtual_host = <<"/">>,
+ kind = exchange,
+ name = Name }.
+
+%% -------------------------------------------------------------------
+%% Cluster Testcases.
+%% -------------------------------------------------------------------
+
+cluster_queue_metrics(Config) ->
+ VHost = <<"/">>,
+ QueueName = <<"cluster_queue_metrics">>,
+ PolicyName = <<"ha-policy-1">>,
+ PolicyPattern = <<".*">>,
+ PolicyAppliesTo = <<"queues">>,
+
+ Node0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node0),
+
+ Node0Name = rabbit_data_coercion:to_binary(Node0),
+ Definition0 = [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [Node0Name]}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
+ PolicyName, PolicyPattern,
+ PolicyAppliesTo, Definition0),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = <<"hello">>}),
+
+ % Update policy to point to other node
+ Node1Name = rabbit_data_coercion:to_binary(Node1),
+ Definition1 = [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [Node1Name]}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
+ PolicyName, PolicyPattern,
+ PolicyAppliesTo, Definition1),
+
+ % Synchronize
+ Name = rabbit_misc:r(VHost, queue, QueueName),
+ [Q] = rabbit_ct_broker_helpers:rpc(Config, Node0, ets, lookup, [rabbit_queue, Name]),
+ QPid = amqqueue:get_pid(Q),
+ ok = rabbit_ct_broker_helpers:rpc(Config, Node0, rabbit_amqqueue, sync_mirrors, [QPid]),
+
+ % Check ETS table for data
+ wait_for(fun () ->
+ [] =:= rabbit_ct_broker_helpers:rpc(
+ Config, Node0, ets, tab2list,
+ [queue_coarse_metrics])
+ end, 60),
+
+ wait_for(fun () ->
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config, Node1, ets, tab2list,
+ [queue_coarse_metrics]),
+ case Ret of
+ [{Name, 1, 0, 1, _}] -> true;
+ _ -> false
+ end
+ end, 60),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue=QueueName}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ Config.
+
+wait_for(_Fun, 0) -> false;
+wait_for(Fun, Seconds) ->
+ case Fun() of
+ true -> ok;
+ false ->
+ timer:sleep(1000),
+ wait_for(Fun, Seconds - 1)
+ end.
diff --git a/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl b/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl
new file mode 100644
index 0000000000..92c01d2b0e
--- /dev/null
+++ b/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Dummy module to test rabbit_direct:extract_extra_auth_props
+
+-module(rabbit_dummy_protocol_connection_info).
+
+%% API
+-export([additional_authn_params/4]).
+
+additional_authn_params(_Creds, _VHost, Pid, _Infos) ->
+ case Pid of
+ -1 -> throw(error);
+ _ -> [{client_id, <<"DummyClientId">>}]
+ end.
diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl
new file mode 100644
index 0000000000..7b90d91bfa
--- /dev/null
+++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl
@@ -0,0 +1,1667 @@
+-module(rabbit_fifo_SUITE).
+
+%% rabbit_fifo unit tests suite
+
+-compile(export_all).
+
+-compile({no_auto_import, [apply/3]}).
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("src/rabbit_fifo.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+%% replicate eunit like test resultion
+all_tests() ->
+ [F || {F, _} <- ?MODULE:module_info(functions),
+ re:run(atom_to_list(F), "_test$") /= nomatch].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+-define(ASSERT_EFF(EfxPat, Effects),
+ ?ASSERT_EFF(EfxPat, true, Effects)).
+
+-define(ASSERT_EFF(EfxPat, Guard, Effects),
+ ?assert(lists:any(fun (EfxPat) when Guard -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(ASSERT_NO_EFF(EfxPat, Effects),
+ ?assert(not lists:any(fun (EfxPat) -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(ASSERT_NO_EFF(EfxPat, Guard, Effects),
+ ?assert(not lists:any(fun (EfxPat) when Guard -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(assertNoEffect(EfxPat, Effects),
+ ?assert(not lists:any(fun (EfxPat) -> true;
+ (_) -> false
+ end, Effects))).
+
+test_init(Name) ->
+ init(#{name => Name,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(Name, utf8)),
+ release_cursor_interval => 0}).
+
+enq_enq_checkout_test(_) ->
+ Cid = {<<"enq_enq_checkout_test">>, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {_State3, _, Effects} =
+ apply(meta(3),
+ rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch}, #{}),
+ State2),
+ ?ASSERT_EFF({monitor, _, _}, Effects),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, Effects),
+ ok.
+
+credit_enq_enq_checkout_settled_credit_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {State3, _, Effects} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {auto, 1, credited}, #{}), State2),
+ ?ASSERT_EFF({monitor, _, _}, Effects),
+ Deliveries = lists:filter(fun ({send_msg, _, {delivery, _, _}, _}) -> true;
+ (_) -> false
+ end, Effects),
+ ?assertEqual(1, length(Deliveries)),
+ %% settle the delivery this should _not_ result in further messages being
+ %% delivered
+ {State4, SettledEffects} = settle(Cid, 4, 1, State3),
+ ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) ->
+ true;
+ (_) -> false
+ end, SettledEffects)),
+ %% granting credit (3) should deliver the second msg if the receivers
+ %% delivery count is (1)
+ {State5, CreditEffects} = credit(Cid, 5, 1, 1, false, State4),
+ % ?debugFmt("CreditEffects ~p ~n~p", [CreditEffects, State4]),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, CreditEffects),
+ {_State6, FinalEffects} = enq(6, 3, third, State5),
+ ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) ->
+ true;
+ (_) -> false
+ end, FinalEffects)),
+ ok.
+
+credit_with_drained_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = test_init(test),
+ %% checkout with a single credit
+ {State1, _, _} =
+ apply(meta(1), rabbit_fifo:make_checkout(Cid, {auto, 1, credited},#{}),
+ State0),
+ ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 1,
+ delivery_count = 0}}},
+ State1),
+ {State, Result, _} =
+ apply(meta(3), rabbit_fifo:make_credit(Cid, 0, 5, true), State1),
+ ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0,
+ delivery_count = 5}}},
+ State),
+ ?assertEqual({multi, [{send_credit_reply, 0},
+ {send_drained, {?FUNCTION_NAME, 5}}]},
+ Result),
+ ok.
+
+credit_and_drain_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ %% checkout without any initial credit (like AMQP 1.0 would)
+ {State3, _, CheckEffs} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {auto, 0, credited}, #{}),
+ State2),
+
+ ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, CheckEffs),
+ {State4, {multi, [{send_credit_reply, 0},
+ {send_drained, {?FUNCTION_NAME, 2}}]},
+ Effects} = apply(meta(4), rabbit_fifo:make_credit(Cid, 4, 0, true), State3),
+ ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0,
+ delivery_count = 4}}},
+ State4),
+
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}},
+ {_, {_, second}}]}, _}, Effects),
+ {_State5, EnqEffs} = enq(5, 2, third, State4),
+ ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, EnqEffs),
+ ok.
+
+
+
+enq_enq_deq_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ % get returns a reply value
+ NumReady = 1,
+ {_State3, {dequeue, {0, {_, first}}, NumReady}, [{monitor, _, _}]} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State2),
+ ok.
+
+enq_enq_deq_deq_settle_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ % get returns a reply value
+ {State3, {dequeue, {0, {_, first}}, 1}, [{monitor, _, _}]} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State2),
+ {_State4, {dequeue, empty}} =
+ apply(meta(4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State3),
+ ok.
+
+enq_enq_checkout_get_settled_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ % get returns a reply value
+ {_State2, {dequeue, {0, {_, first}}, _}, _Effs} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}),
+ State1),
+ ok.
+
+checkout_get_empty_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State = test_init(test),
+ {_State2, {dequeue, empty}} =
+ apply(meta(1), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State),
+ ok.
+
+untracked_enq_deq_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = test_init(test),
+ {State1, _, _} = apply(meta(1),
+ rabbit_fifo:make_enqueue(undefined, undefined, first),
+ State0),
+ {_State2, {dequeue, {0, {_, first}}, _}, _} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1),
+ ok.
+
+release_cursor_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {State3, _} = check(Cid, 3, 10, State2),
+ % no release cursor effect at this point
+ {State4, _} = settle(Cid, 4, 1, State3),
+ {_Final, Effects1} = settle(Cid, 5, 0, State4),
+ % empty queue forwards release cursor all the way
+ ?ASSERT_EFF({release_cursor, 5, _}, Effects1),
+ ok.
+
+checkout_enq_settle_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, [{monitor, _, _} | _]} = check(Cid, 1, test_init(test)),
+ {State2, Effects0} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, ?FUNCTION_NAME,
+ [{0, {_, first}}]}, _},
+ Effects0),
+ {State3, [_Inactive]} = enq(3, 2, second, State2),
+ {_, _Effects} = settle(Cid, 4, 0, State3),
+ % the release cursor is the smallest raft index that does not
+ % contribute to the state of the application
+ % ?ASSERT_EFF({release_cursor, 2, _}, Effects),
+ ok.
+
+out_of_order_enqueue_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, [{monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)),
+ {State2, Effects2} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2),
+ % assert monitor was set up
+ ?ASSERT_EFF({monitor, _, _}, Effects2),
+ % enqueue seq num 3 and 4 before 2
+ {State3, Effects3} = enq(3, 3, third, State2),
+ ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects3),
+ {State4, Effects4} = enq(4, 4, fourth, State3),
+ % assert no further deliveries where made
+ ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects4),
+ {_State5, Effects5} = enq(5, 2, second, State4),
+ % assert two deliveries were now made
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, second}},
+ {_, {_, third}},
+ {_, {_, fourth}}]}, _},
+ Effects5),
+ ok.
+
+out_of_order_first_enqueue_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = check_n(Cid, 5, 5, test_init(test)),
+ {_State2, Effects2} = enq(2, 10, first, State1),
+ ?ASSERT_EFF({monitor, process, _}, Effects2),
+ ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _},
+ Effects2),
+ ok.
+
+duplicate_enqueue_test(_) ->
+ Cid = {<<"duplicate_enqueue_test">>, self()},
+ {State1, [{monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)),
+ {State2, Effects2} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2),
+ {_State3, Effects3} = enq(3, 1, first, State2),
+ ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects3),
+ ok.
+
+return_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Cid2 = {<<"cid2">>, self()},
+ {State0, _} = enq(1, 1, msg, test_init(test)),
+ {State1, _} = check_auto(Cid, 2, State0),
+ {State2, _} = check_auto(Cid2, 3, State1),
+ {State3, _, _} = apply(meta(4), rabbit_fifo:make_return(Cid, [0]), State2),
+ ?assertMatch(#{Cid := #consumer{checked_out = C}} when map_size(C) == 0,
+ State3#rabbit_fifo.consumers),
+ ?assertMatch(#{Cid2 := #consumer{checked_out = C2}} when map_size(C2) == 1,
+ State3#rabbit_fifo.consumers),
+ ok.
+
+return_dequeue_delivery_limit_test(_) ->
+ Init = init(#{name => test,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(test, utf8)),
+ release_cursor_interval => 0,
+ delivery_limit => 1}),
+ {State0, _} = enq(1, 1, msg, Init),
+
+ Cid = {<<"cid">>, self()},
+ Cid2 = {<<"cid2">>, self()},
+
+ {State1, {MsgId1, _}} = deq(2, Cid, unsettled, State0),
+ {State2, _, _} = apply(meta(4), rabbit_fifo:make_return(Cid, [MsgId1]),
+ State1),
+
+ {State3, {MsgId2, _}} = deq(2, Cid2, unsettled, State2),
+ {State4, _, _} = apply(meta(4), rabbit_fifo:make_return(Cid2, [MsgId2]),
+ State3),
+ ?assertMatch(#{num_messages := 0}, rabbit_fifo:overview(State4)),
+ ok.
+
+return_non_existent_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State0, [_, _Inactive]} = enq(1, 1, second, test_init(test)),
+ % return non-existent
+ {_State2, _} = apply(meta(3), rabbit_fifo:make_return(Cid, [99]), State0),
+ ok.
+
+return_checked_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State0, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active} | _ ]} = check_auto(Cid, 2, State0),
+ % returning immediately checks out the same message again
+ {_, ok, [{send_msg, _, {delivery, _, [{_, _}]}, _},
+ {aux, active}]} =
+ apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1),
+ ok.
+
+return_checked_out_limit_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Init = init(#{name => test,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(test, utf8)),
+ release_cursor_interval => 0,
+ delivery_limit => 1}),
+ {State0, [_, _]} = enq(1, 1, first, Init),
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active} | _ ]} = check_auto(Cid, 2, State0),
+ % returning immediately checks out the same message again
+ {State2, ok, [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _},
+ {aux, active}]} =
+ apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1),
+ {#rabbit_fifo{ra_indexes = RaIdxs}, ok, [_ReleaseEff]} =
+ apply(meta(4), rabbit_fifo:make_return(Cid, [MsgId2]), State2),
+ ?assertEqual(0, rabbit_fifo_index:size(RaIdxs)),
+ ok.
+
+return_auto_checked_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State00, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State0, [_]} = enq(2, 2, second, State00),
+ % it first active then inactive as the consumer took on but cannot take
+ % any more
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active},
+ {aux, inactive}
+ ]} = check_auto(Cid, 2, State0),
+ % return should include another delivery
+ {_State2, _, Effects} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{_, {#{delivery_count := 1}, first}}]}, _},
+ Effects),
+ ok.
+
+cancelled_checkout_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State00, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State0, [_]} = enq(2, 2, second, State00),
+ {State1, _} = check_auto(Cid, 2, State0),
+ % cancelled checkout should not return pending messages to queue
+ {State2, _, _} = apply(meta(3), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1),
+ ?assertEqual(1, lqueue:len(State2#rabbit_fifo.messages)),
+ ?assertEqual(0, lqueue:len(State2#rabbit_fifo.returns)),
+
+ {State3, {dequeue, empty}} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State2),
+ %% settle
+ {State4, ok, _} =
+ apply(meta(4), rabbit_fifo:make_settle(Cid, [0]), State3),
+
+ {_State, {dequeue, {_, {_, second}}, _}, _} =
+ apply(meta(5), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State4),
+ ok.
+
+down_with_noproc_consumer_returns_unsettled_test(_) ->
+ Cid = {<<"down_consumer_returns_unsettled_test">>, self()},
+ {State0, [_, _]} = enq(1, 1, second, test_init(test)),
+ {State1, [{monitor, process, Pid} | _]} = check(Cid, 2, State0),
+ {State2, _, _} = apply(meta(3), {down, Pid, noproc}, State1),
+ {_State, Effects} = check(Cid, 4, State2),
+ ?ASSERT_EFF({monitor, process, _}, Effects),
+ ok.
+
+down_with_noconnection_marks_suspect_and_node_is_monitored_test(_) ->
+ Pid = spawn(fun() -> ok end),
+ Cid = {<<"down_with_noconnect">>, Pid},
+ Self = self(),
+ Node = node(Pid),
+ {State0, Effects0} = enq(1, 1, second, test_init(test)),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects0),
+ {State1, Effects1} = check_auto(Cid, 2, State0),
+ #consumer{credit = 0} = maps:get(Cid, State1#rabbit_fifo.consumers),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects1),
+ % monitor both enqueuer and consumer
+ % because we received a noconnection we now need to monitor the node
+ {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1),
+ #consumer{credit = 1,
+ checked_out = Ch,
+ status = suspected_down} = maps:get(Cid, State2a#rabbit_fifo.consumers),
+ ?assertEqual(#{}, Ch),
+ %% validate consumer has credit
+ {State2, _, Effects2} = apply(meta(3), {down, Self, noconnection}, State2a),
+ ?ASSERT_EFF({monitor, node, _}, Effects2),
+ ?assertNoEffect({demonitor, process, _}, Effects2),
+ % when the node comes up we need to retry the process monitors for the
+ % disconnected processes
+ {State3, _, Effects3} = apply(meta(3), {nodeup, Node}, State2),
+ #consumer{status = up} = maps:get(Cid, State3#rabbit_fifo.consumers),
+ % try to re-monitor the suspect processes
+ ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects3),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects3),
+ ok.
+
+down_with_noconnection_returns_unack_test(_) ->
+ Pid = spawn(fun() -> ok end),
+ Cid = {<<"down_with_noconnect">>, Pid},
+ {State0, _} = enq(1, 1, second, test_init(test)),
+ ?assertEqual(1, lqueue:len(State0#rabbit_fifo.messages)),
+ ?assertEqual(0, lqueue:len(State0#rabbit_fifo.returns)),
+ {State1, {_, _}} = deq(2, Cid, unsettled, State0),
+ ?assertEqual(0, lqueue:len(State1#rabbit_fifo.messages)),
+ ?assertEqual(0, lqueue:len(State1#rabbit_fifo.returns)),
+ {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1),
+ ?assertEqual(0, lqueue:len(State2a#rabbit_fifo.messages)),
+ ?assertEqual(1, lqueue:len(State2a#rabbit_fifo.returns)),
+ ?assertMatch(#consumer{checked_out = Ch,
+ status = suspected_down}
+ when map_size(Ch) == 0,
+ maps:get(Cid, State2a#rabbit_fifo.consumers)),
+ ok.
+
+down_with_noproc_enqueuer_is_cleaned_up_test(_) ->
+ State00 = test_init(test),
+ Pid = spawn(fun() -> ok end),
+ {State0, _, Effects0} = apply(meta(1), rabbit_fifo:make_enqueue(Pid, 1, first), State00),
+ ?ASSERT_EFF({monitor, process, _}, Effects0),
+ {State1, _, _} = apply(meta(3), {down, Pid, noproc}, State0),
+ % ensure there are no enqueuers
+ ?assert(0 =:= maps:size(State1#rabbit_fifo.enqueuers)),
+ ok.
+
+discarded_message_without_dead_letter_handler_is_removed_test(_) ->
+ Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()},
+ {State0, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State1, Effects1} = check_n(Cid, 2, 10, State0),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects1),
+ {_State2, _, Effects2} = apply(meta(1),
+ rabbit_fifo:make_discard(Cid, [0]), State1),
+ ?assertNoEffect({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects2),
+ ok.
+
+discarded_message_with_dead_letter_handler_emits_log_effect_test(_) ->
+ Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()},
+ State00 = init(#{name => test,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ dead_letter_handler =>
+ {somemod, somefun, [somearg]}}),
+ {State0, [_, _]} = enq(1, 1, first, State00),
+ {State1, Effects1} = check_n(Cid, 2, 10, State0),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects1),
+ {_State2, _, Effects2} = apply(meta(1), rabbit_fifo:make_discard(Cid, [0]), State1),
+ % assert mod call effect with appended reason and message
+ ?ASSERT_EFF({log, _RaftIdxs, _}, Effects2),
+ ok.
+
+tick_test(_) ->
+ Cid = {<<"c">>, self()},
+ Cid2 = {<<"c2">>, self()},
+ {S0, _} = enq(1, 1, <<"fst">>, test_init(?FUNCTION_NAME)),
+ {S1, _} = enq(2, 2, <<"snd">>, S0),
+ {S2, {MsgId, _}} = deq(3, Cid, unsettled, S1),
+ {S3, {_, _}} = deq(4, Cid2, unsettled, S2),
+ {S4, _, _} = apply(meta(5), rabbit_fifo:make_return(Cid, [MsgId]), S3),
+
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{},
+ {?FUNCTION_NAME, 1, 1, 2, 1, 3, 3},
+ [_Node]
+ ]}] = rabbit_fifo:tick(1, S4),
+ ok.
+
+
+delivery_query_returns_deliveries_test(_) ->
+ Tag = atom_to_binary(?FUNCTION_NAME, utf8),
+ Cid = {Tag, self()},
+ Commands = [
+ rabbit_fifo:make_checkout(Cid, {auto, 5, simple_prefetch}, #{}),
+ rabbit_fifo:make_enqueue(self(), 1, one),
+ rabbit_fifo:make_enqueue(self(), 2, two),
+ rabbit_fifo:make_enqueue(self(), 3, tre),
+ rabbit_fifo:make_enqueue(self(), 4, for)
+ ],
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ {State, _Effects} = run_log(test_init(help), Entries),
+ % 3 deliveries are returned
+ [{0, {_, one}}] = rabbit_fifo:get_checked_out(Cid, 0, 0, State),
+ [_, _, _] = rabbit_fifo:get_checked_out(Cid, 1, 3, State),
+ ok.
+
+pending_enqueue_is_enqueued_on_down_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Pid = self(),
+ {State0, _} = enq(1, 2, first, test_init(test)),
+ {State1, _, _} = apply(meta(2), {down, Pid, noproc}, State0),
+ {_State2, {dequeue, {0, {_, first}}, 0}, _} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1),
+ ok.
+
+duplicate_delivery_test(_) ->
+ {State0, _} = enq(1, 1, first, test_init(test)),
+ {#rabbit_fifo{ra_indexes = RaIdxs,
+ messages = Messages}, _} = enq(2, 1, first, State0),
+ ?assertEqual(1, rabbit_fifo_index:size(RaIdxs)),
+ ?assertEqual(1, lqueue:len(Messages)),
+ ok.
+
+state_enter_file_handle_leader_reservation_test(_) ->
+ S0 = init(#{name => the_name,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ become_leader_handler => {m, f, [a]}}),
+
+ Resource = {resource, <<"/">>, queue, <<"test">>},
+ Effects = rabbit_fifo:state_enter(leader, S0),
+ ?assertEqual([
+ {mod_call, m, f, [a, the_name]},
+ {mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}
+ ], Effects),
+ ok.
+
+state_enter_file_handle_other_reservation_test(_) ->
+ S0 = init(#{name => the_name,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ Effects = rabbit_fifo:state_enter(other, S0),
+ ?assertEqual([
+ {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}
+ ],
+ Effects),
+ ok.
+
+state_enter_monitors_and_notifications_test(_) ->
+ Oth = spawn(fun () -> ok end),
+ {State0, _} = enq(1, 1, first, test_init(test)),
+ Cid = {<<"adf">>, self()},
+ OthCid = {<<"oth">>, Oth},
+ {State1, _} = check(Cid, 2, State0),
+ {State, _} = check(OthCid, 3, State1),
+ Self = self(),
+ Effects = rabbit_fifo:state_enter(leader, State),
+
+ %% monitor all enqueuers and consumers
+ [{monitor, process, Self},
+ {monitor, process, Oth}] =
+ lists:filter(fun ({monitor, process, _}) -> true;
+ (_) -> false
+ end, Effects),
+ [{send_msg, Self, leader_change, ra_event},
+ {send_msg, Oth, leader_change, ra_event}] =
+ lists:filter(fun ({send_msg, _, leader_change, ra_event}) -> true;
+ (_) -> false
+ end, Effects),
+ ?ASSERT_EFF({monitor, process, _}, Effects),
+ ok.
+
+purge_test(_) ->
+ Cid = {<<"purge_test">>, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, {purge, 1}, _} = apply(meta(2), rabbit_fifo:make_purge(), State1),
+ {State3, _} = enq(3, 2, second, State2),
+ % get returns a reply value
+ {_State4, {dequeue, {0, {_, second}}, _}, [{monitor, _, _}]} =
+ apply(meta(4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State3),
+ ok.
+
+purge_with_checkout_test(_) ->
+ Cid = {<<"purge_test">>, self()},
+ {State0, _} = check_auto(Cid, 1, test_init(?FUNCTION_NAME)),
+ {State1, _} = enq(2, 1, <<"first">>, State0),
+ {State2, _} = enq(3, 2, <<"second">>, State1),
+ %% assert message bytes are non zero
+ ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0),
+ ?assert(State2#rabbit_fifo.msg_bytes_enqueue > 0),
+ {State3, {purge, 1}, _} = apply(meta(2), rabbit_fifo:make_purge(), State2),
+ ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0),
+ ?assertEqual(0, State3#rabbit_fifo.msg_bytes_enqueue),
+ ?assertEqual(1, rabbit_fifo_index:size(State3#rabbit_fifo.ra_indexes)),
+ #consumer{checked_out = Checked} = maps:get(Cid, State3#rabbit_fifo.consumers),
+ ?assertEqual(1, maps:size(Checked)),
+ ok.
+
+down_noproc_returns_checked_out_in_order_test(_) ->
+ S0 = test_init(?FUNCTION_NAME),
+ %% enqueue 100
+ S1 = lists:foldl(fun (Num, FS0) ->
+ {FS, _} = enq(Num, Num, Num, FS0),
+ FS
+ end, S0, lists:seq(1, 100)),
+ ?assertEqual(100, lqueue:len(S1#rabbit_fifo.messages)),
+ Cid = {<<"cid">>, self()},
+ {S2, _} = check(Cid, 101, 1000, S1),
+ #consumer{checked_out = Checked} = maps:get(Cid, S2#rabbit_fifo.consumers),
+ ?assertEqual(100, maps:size(Checked)),
+ %% simulate down
+ {S, _, _} = apply(meta(102), {down, self(), noproc}, S2),
+ Returns = lqueue:to_list(S#rabbit_fifo.returns),
+ ?assertEqual(100, length(Returns)),
+ ?assertEqual(0, maps:size(S#rabbit_fifo.consumers)),
+ %% validate returns are in order
+ ?assertEqual(lists:sort(Returns), Returns),
+ ok.
+
+down_noconnection_returns_checked_out_test(_) ->
+ S0 = test_init(?FUNCTION_NAME),
+ NumMsgs = 20,
+ S1 = lists:foldl(fun (Num, FS0) ->
+ {FS, _} = enq(Num, Num, Num, FS0),
+ FS
+ end, S0, lists:seq(1, NumMsgs)),
+ ?assertEqual(NumMsgs, lqueue:len(S1#rabbit_fifo.messages)),
+ Cid = {<<"cid">>, self()},
+ {S2, _} = check(Cid, 101, 1000, S1),
+ #consumer{checked_out = Checked} = maps:get(Cid, S2#rabbit_fifo.consumers),
+ ?assertEqual(NumMsgs, maps:size(Checked)),
+ %% simulate down
+ {S, _, _} = apply(meta(102), {down, self(), noconnection}, S2),
+ Returns = lqueue:to_list(S#rabbit_fifo.returns),
+ ?assertEqual(NumMsgs, length(Returns)),
+ ?assertMatch(#consumer{checked_out = Ch}
+ when map_size(Ch) == 0,
+ maps:get(Cid, S#rabbit_fifo.consumers)),
+ %% validate returns are in order
+ ?assertEqual(lists:sort(Returns), Returns),
+ ok.
+
+single_active_consumer_basic_get_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ ?assertEqual(single_active, State0#rabbit_fifo.cfg#cfg.consumer_strategy),
+ ?assertEqual(0, map_size(State0#rabbit_fifo.consumers)),
+ {State1, _} = enq(1, 1, first, State0),
+ {_State, {error, {unsupported, single_active_consumer}}} =
+ apply(meta(2), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State1),
+ ok.
+
+single_active_consumer_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ ?assertEqual(single_active, State0#rabbit_fifo.cfg#cfg.consumer_strategy),
+ ?assertEqual(0, map_size(State0#rabbit_fifo.consumers)),
+
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ meta(1),
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch},
+ #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ C3 = {<<"ctag3">>, self()},
+ C4 = {<<"ctag4">>, self()},
+
+ % the first registered consumer is the active one, the others are waiting
+ ?assertEqual(1, map_size(State1#rabbit_fifo.consumers)),
+ ?assertMatch(#{C1 := _}, State1#rabbit_fifo.consumers),
+ ?assertEqual(3, length(State1#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C2, 1, State1#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C3, 1, State1#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1, State1#rabbit_fifo.waiting_consumers)),
+
+ % cancelling a waiting consumer
+ {State2, _, Effects1} = apply(meta(2),
+ make_checkout(C3, cancel, #{}),
+ State1),
+ % the active consumer should still be in place
+ ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)),
+ ?assertMatch(#{C1 := _}, State2#rabbit_fifo.consumers),
+ % the cancelled consumer has been removed from waiting consumers
+ ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C2, 1, State2#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1, State2#rabbit_fifo.waiting_consumers)),
+ % there are some effects to unregister the consumer
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C3, Effects1),
+
+ % cancelling the active consumer
+ {State3, _, Effects2} = apply(meta(3),
+ make_checkout(C1, cancel, #{}),
+ State2),
+ % the second registered consumer is now the active one
+ ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)),
+ ?assertMatch(#{C2 := _}, State3#rabbit_fifo.consumers),
+ % the new active consumer is no longer in the waiting list
+ ?assertEqual(1, length(State3#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1,
+ State3#rabbit_fifo.waiting_consumers)),
+ %% should have a cancel consumer handler mod_call effect and
+ %% an active new consumer effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C1, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects2),
+
+ % cancelling the active consumer
+ {State4, _, Effects3} = apply(meta(4),
+ make_checkout(C2, cancel, #{}),
+ State3),
+ % the last waiting consumer became the active one
+ ?assertEqual(1, map_size(State4#rabbit_fifo.consumers)),
+ ?assertMatch(#{C4 := _}, State4#rabbit_fifo.consumers),
+ % the waiting consumer list is now empty
+ ?assertEqual(0, length(State4#rabbit_fifo.waiting_consumers)),
+ % there are some effects to unregister the consumer and
+ % to update the new active one (metrics)
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C2, Effects3),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects3),
+
+ % cancelling the last consumer
+ {State5, _, Effects4} = apply(meta(5),
+ make_checkout(C4, cancel, #{}),
+ State4),
+ % no active consumer anymore
+ ?assertEqual(0, map_size(State5#rabbit_fifo.consumers)),
+ % still nothing in the waiting list
+ ?assertEqual(0, length(State5#rabbit_fifo.waiting_consumers)),
+ % there is an effect to unregister the consumer + queue inactive effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, _}, Effects4),
+
+ ok.
+
+single_active_consumer_cancel_consumer_when_channel_is_down_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ [C1, C2, C3, C4] = Consumers =
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}],
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, Consumers),
+
+ % the channel of the active consumer goes down
+ {State2, _, Effects} = apply(meta(2), {down, Pid1, noproc}, State1),
+ % fell back to another consumer
+ ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)),
+ % there are still waiting consumers
+ ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)),
+ % effects to unregister the consumer and
+ % to update the new active one (metrics) are there
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C1, Effects),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects),
+
+ % the channel of the active consumer and a waiting consumer goes down
+ {State3, _, Effects2} = apply(meta(3), {down, Pid2, noproc}, State2),
+ % fell back to another consumer
+ ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)),
+ % no more waiting consumer
+ ?assertEqual(0, length(State3#rabbit_fifo.waiting_consumers)),
+ % effects to cancel both consumers of this channel + effect to update the new active one (metrics)
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C2, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C3, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects2),
+
+ % the last channel goes down
+ {State4, _, Effects3} = apply(meta(4), {down, Pid3, doesnotmatter}, State3),
+ % no more consumers
+ ?assertEqual(0, map_size(State4#rabbit_fifo.consumers)),
+ ?assertEqual(0, length(State4#rabbit_fifo.waiting_consumers)),
+ % there is an effect to unregister the consumer + queue inactive effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C4, Effects3),
+
+ ok.
+
+single_active_returns_messages_on_noconnection_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1],
+ ConsumerIds = [{_, DownPid}] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1 = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+ {State2, _} = enq(4, 1, msg1, State1),
+ % simulate node goes down
+ {State3, _, _} = apply(meta(5), {down, DownPid, noconnection}, State2),
+ %% assert the consumer is up
+ ?assertMatch([_], lqueue:to_list(State3#rabbit_fifo.returns)),
+ ?assertMatch([{_, #consumer{checked_out = Checked}}]
+ when map_size(Checked) == 0,
+ State3#rabbit_fifo.waiting_consumers),
+
+ ok.
+
+single_active_consumer_replaces_consumer_when_down_noconnection_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1, n2, node()],
+ ConsumerIds = [C1 = {_, DownPid}, C2, _C3] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1a = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+
+ %% assert the consumer is up
+ ?assertMatch(#{C1 := #consumer{status = up}},
+ State1a#rabbit_fifo.consumers),
+
+ {State1, _} = enq(10, 1, msg, State1a),
+
+ % simulate node goes down
+ {State2, _, _} = apply(meta(5), {down, DownPid, noconnection}, State1),
+
+ %% assert a new consumer is in place and it is up
+ ?assertMatch([{C2, #consumer{status = up,
+ checked_out = Ch}}]
+ when map_size(Ch) == 1,
+ maps:to_list(State2#rabbit_fifo.consumers)),
+
+ %% the disconnected consumer has been returned to waiting
+ ?assert(lists:any(fun ({C,_}) -> C =:= C1 end,
+ State2#rabbit_fifo.waiting_consumers)),
+ ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)),
+
+ % simulate node comes back up
+ {State3, _, _} = apply(#{index => 2}, {nodeup, node(DownPid)}, State2),
+
+ %% the consumer is still active and the same as before
+ ?assertMatch([{C2, #consumer{status = up}}],
+ maps:to_list(State3#rabbit_fifo.consumers)),
+ % the waiting consumers should be un-suspected
+ ?assertEqual(2, length(State3#rabbit_fifo.waiting_consumers)),
+ lists:foreach(fun({_, #consumer{status = Status}}) ->
+ ?assert(Status /= suspected_down)
+ end, State3#rabbit_fifo.waiting_consumers),
+ ok.
+
+single_active_consumer_all_disconnected_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1, n2],
+ ConsumerIds = [C1 = {_, C1Pid}, C2 = {_, C2Pid}] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1 = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+
+ %% assert the consumer is up
+ ?assertMatch(#{C1 := #consumer{status = up}}, State1#rabbit_fifo.consumers),
+
+ % simulate node goes down
+ {State2, _, _} = apply(meta(5), {down, C1Pid, noconnection}, State1),
+ %% assert the consumer fails over to the consumer on n2
+ ?assertMatch(#{C2 := #consumer{status = up}}, State2#rabbit_fifo.consumers),
+ {State3, _, _} = apply(meta(6), {down, C2Pid, noconnection}, State2),
+ %% assert these no active consumer after both nodes are maked as down
+ ?assertMatch([], maps:to_list(State3#rabbit_fifo.consumers)),
+ %% n2 comes back
+ {State4, _, _} = apply(meta(7), {nodeup, node(C2Pid)}, State3),
+ %% ensure n2 is the active consumer as this node as been registered
+ %% as up again
+ ?assertMatch([{{<<"ctag_n2">>, _}, #consumer{status = up,
+ credit = 1}}],
+ maps:to_list(State4#rabbit_fifo.consumers)),
+ ok.
+
+single_active_consumer_state_enter_leader_include_waiting_consumers_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource =>
+ rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ Effects = rabbit_fifo:state_enter(leader, State1),
+ %% 2 effects for each consumer process (channel process), 1 effect for the node,
+ %% 1 effect for file handle reservation
+ ?assertEqual(2 * 3 + 1 + 1, length(Effects)).
+
+single_active_consumer_state_enter_eol_include_waiting_consumers_test(_) ->
+ Resource = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => Resource,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ Effects = rabbit_fifo:state_enter(eol, State1),
+ %% 1 effect for each consumer process (channel process),
+ %% 1 effect for file handle reservation
+ ?assertEqual(4, length(Effects)).
+
+query_consumers_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => false}),
+
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+ Consumers0 = State1#rabbit_fifo.consumers,
+ Consumer = maps:get({<<"ctag2">>, self()}, Consumers0),
+ Consumers1 = maps:put({<<"ctag2">>, self()},
+ Consumer#consumer{status = suspected_down}, Consumers0),
+ State2 = State1#rabbit_fifo{consumers = Consumers1},
+
+ ?assertEqual(3, rabbit_fifo:query_consumer_count(State2)),
+ Consumers2 = rabbit_fifo:query_consumers(State2),
+ ?assertEqual(4, maps:size(Consumers2)),
+ maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ <<"ctag2">> ->
+ ?assertNot(Active),
+ ?assertEqual(suspected_down, ActivityStatus);
+ _ ->
+ ?assert(Active),
+ ?assertEqual(up, ActivityStatus)
+ end
+ end, [], Consumers2).
+
+query_consumers_when_single_active_consumer_is_on_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+
+ ?assertEqual(4, rabbit_fifo:query_consumer_count(State1)),
+ Consumers = rabbit_fifo:query_consumers(State1),
+ ?assertEqual(4, maps:size(Consumers)),
+ maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ <<"ctag1">> ->
+ ?assert(Active),
+ ?assertEqual(single_active, ActivityStatus);
+ _ ->
+ ?assertNot(Active),
+ ?assertEqual(waiting, ActivityStatus)
+ end
+ end, [], Consumers).
+
+active_flag_updated_when_consumer_suspected_unsuspected_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => false}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} =
+ apply(
+ #{index => 1},
+ rabbit_fifo:make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch},
+ #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ {State2, _, Effects2} = apply(#{index => 3,
+ system_time => 1500}, {down, Pid1, noconnection}, State1),
+ % 1 effect to update the metrics of each consumer (they belong to the same node), 1 more effect to monitor the node
+ ?assertEqual(4 + 1, length(Effects2)),
+
+ {_, _, Effects3} = apply(#{index => 4}, {nodeup, node(self())}, State2),
+ % for each consumer: 1 effect to update the metrics, 1 effect to monitor the consumer PID
+ ?assertEqual(4 + 4, length(Effects3)).
+
+active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ {State2, _, Effects2} = apply(meta(2), {down, Pid1, noconnection}, State1),
+ % one monitor and one consumer status update (deactivated)
+ ?assertEqual(3, length(Effects2)),
+
+ {_, _, Effects3} = apply(meta(3), {nodeup, node(self())}, State2),
+ % for each consumer: 1 effect to monitor the consumer PID
+ ?assertEqual(5, length(Effects3)).
+
+single_active_cancelled_with_unacked_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ % adding some consumers
+ AddConsumer = fun(C, S0) ->
+ {S, _, _} = apply(
+ meta(1),
+ make_checkout(C,
+ {auto, 1, simple_prefetch},
+ #{}),
+ S0),
+ S
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [C1, C2]),
+
+ %% enqueue 2 messages
+ {State2, _Effects2} = enq(3, 1, msg1, State1),
+ {State3, _Effects3} = enq(4, 2, msg2, State2),
+ %% one should be checked ou to C1
+ %% cancel C1
+ {State4, _, _} = apply(meta(5),
+ make_checkout(C1, cancel, #{}),
+ State3),
+ %% C2 should be the active consumer
+ ?assertMatch(#{C2 := #consumer{status = up,
+ checked_out = #{0 := _}}},
+ State4#rabbit_fifo.consumers),
+ %% C1 should be a cancelled consumer
+ ?assertMatch(#{C1 := #consumer{status = cancelled,
+ lifetime = once,
+ checked_out = #{0 := _}}},
+ State4#rabbit_fifo.consumers),
+ ?assertMatch([], State4#rabbit_fifo.waiting_consumers),
+
+ %% Ack both messages
+ {State5, _Effects5} = settle(C1, 1, 0, State4),
+ %% C1 should now be cancelled
+ {State6, _Effects6} = settle(C2, 2, 0, State5),
+
+ %% C2 should remain
+ ?assertMatch(#{C2 := #consumer{status = up}},
+ State6#rabbit_fifo.consumers),
+ %% C1 should be gone
+ ?assertNotMatch(#{C1 := _},
+ State6#rabbit_fifo.consumers),
+ ?assertMatch([], State6#rabbit_fifo.waiting_consumers),
+ ok.
+
+single_active_with_credited_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ % adding some consumers
+ AddConsumer = fun(C, S0) ->
+ {S, _, _} = apply(
+ meta(1),
+ make_checkout(C,
+ {auto, 0, credited},
+ #{}),
+ S0),
+ S
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [C1, C2]),
+
+ %% add some credit
+ C1Cred = rabbit_fifo:make_credit(C1, 5, 0, false),
+ {State2, _, _Effects2} = apply(meta(3), C1Cred, State1),
+ C2Cred = rabbit_fifo:make_credit(C2, 4, 0, false),
+ {State3, _} = apply(meta(4), C2Cred, State2),
+ %% both consumers should have credit
+ ?assertMatch(#{C1 := #consumer{credit = 5}},
+ State3#rabbit_fifo.consumers),
+ ?assertMatch([{C2, #consumer{credit = 4}}],
+ State3#rabbit_fifo.waiting_consumers),
+ ok.
+
+
+register_enqueuer_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ max_length => 2,
+ overflow_strategy => reject_publish}),
+ %% simply registering should be ok when we're below limit
+ Pid1 = test_util:fake_pid(node()),
+ {State1, ok, [_]} = apply(meta(1), make_register_enqueuer(Pid1), State0),
+
+ {State2, ok, _} = apply(meta(2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1),
+ %% register another enqueuer shoudl be ok
+ Pid2 = test_util:fake_pid(node()),
+ {State3, ok, [_]} = apply(meta(3), make_register_enqueuer(Pid2), State2),
+
+ {State4, ok, _} = apply(meta(4), rabbit_fifo:make_enqueue(Pid1, 2, two), State3),
+ {State5, ok, Efx} = apply(meta(5), rabbit_fifo:make_enqueue(Pid1, 3, three), State4),
+ % ct:pal("Efx ~p", [Efx]),
+ %% validate all registered enqueuers are notified of overflow state
+ ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx),
+ ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid2, Efx),
+
+ %% this time, registry should return reject_publish
+ {State6, reject_publish, [_]} = apply(meta(6), make_register_enqueuer(
+ test_util:fake_pid(node())), State5),
+ ?assertMatch(#{num_enqueuers := 3}, rabbit_fifo:overview(State6)),
+
+
+ %% remove two messages this should make the queue fall below the 0.8 limit
+ {State7, {dequeue, _, _}, _Efx7} =
+ apply(meta(7),
+ rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State6),
+ ct:pal("Efx7 ~p", [_Efx7]),
+ {State8, {dequeue, _, _}, Efx8} =
+ apply(meta(8),
+ rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State7),
+ ct:pal("Efx8 ~p", [Efx8]),
+ %% validate all registered enqueuers are notified of overflow state
+ ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx8),
+ ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid2, Efx8),
+ {_State9, {dequeue, _, _}, Efx9} =
+ apply(meta(9),
+ rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State8),
+ ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid1, Efx9),
+ ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid2, Efx9),
+ ok.
+
+reject_publish_purge_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ max_length => 2,
+ overflow_strategy => reject_publish}),
+ %% simply registering should be ok when we're below limit
+ Pid1 = test_util:fake_pid(node()),
+ {State1, ok, [_]} = apply(meta(1), make_register_enqueuer(Pid1), State0),
+ {State2, ok, _} = apply(meta(2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1),
+ {State3, ok, _} = apply(meta(3), rabbit_fifo:make_enqueue(Pid1, 2, two), State2),
+ {State4, ok, Efx} = apply(meta(4), rabbit_fifo:make_enqueue(Pid1, 3, three), State3),
+ % ct:pal("Efx ~p", [Efx]),
+ ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx),
+ {_State5, {purge, 3}, Efx1} = apply(meta(5), rabbit_fifo:make_purge(), State4),
+ ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx1),
+ ok.
+
+reject_publish_applied_after_limit_test(_) ->
+ InitConf = #{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8))
+ },
+ State0 = init(InitConf),
+ %% simply registering should be ok when we're below limit
+ Pid1 = test_util:fake_pid(node()),
+ {State1, ok, [_]} = apply(meta(1), make_register_enqueuer(Pid1), State0),
+ {State2, ok, _} = apply(meta(2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1),
+ {State3, ok, _} = apply(meta(3), rabbit_fifo:make_enqueue(Pid1, 2, two), State2),
+ {State4, ok, Efx} = apply(meta(4), rabbit_fifo:make_enqueue(Pid1, 3, three), State3),
+ % ct:pal("Efx ~p", [Efx]),
+ ?ASSERT_NO_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx),
+ %% apply new config
+ Conf = #{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ max_length => 2,
+ overflow_strategy => reject_publish
+ },
+ {State5, ok, Efx1} = apply(meta(5), rabbit_fifo:make_update_config(Conf), State4),
+ ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx1),
+ Pid2 = test_util:fake_pid(node()),
+ {_State6, reject_publish, _} = apply(meta(1), make_register_enqueuer(Pid2), State5),
+ ok.
+
+purge_nodes_test(_) ->
+ Node = purged@node,
+ ThisNode = node(),
+ EnqPid = test_util:fake_pid(Node),
+ EnqPid2 = test_util:fake_pid(node()),
+ ConPid = test_util:fake_pid(Node),
+ Cid = {<<"tag">>, ConPid},
+ % WaitingPid = test_util:fake_pid(Node),
+
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ single_active_consumer_on => false}),
+ {State1, _, _} = apply(meta(1),
+ rabbit_fifo:make_enqueue(EnqPid, 1, msg1),
+ State0),
+ {State2, _, _} = apply(meta(2),
+ rabbit_fifo:make_enqueue(EnqPid2, 1, msg2),
+ State1),
+ {State3, _} = check(Cid, 3, 1000, State2),
+ {State4, _, _} = apply(meta(4),
+ {down, EnqPid, noconnection},
+ State3),
+ ?assertMatch(
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{}, _Metrics,
+ [ThisNode, Node]
+ ]}] , rabbit_fifo:tick(1, State4)),
+ %% assert there are both enqueuers and consumers
+ {State, _, _} = apply(meta(5),
+ rabbit_fifo:make_purge_nodes([Node]),
+ State4),
+
+ %% assert there are no enqueuers nor consumers
+ ?assertMatch(#rabbit_fifo{enqueuers = Enqs} when map_size(Enqs) == 1,
+ State),
+
+ ?assertMatch(#rabbit_fifo{consumers = Cons} when map_size(Cons) == 0,
+ State),
+ ?assertMatch(
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{}, _Metrics,
+ [ThisNode]
+ ]}] , rabbit_fifo:tick(1, State)),
+ ok.
+
+meta(Idx) ->
+ meta(Idx, 0).
+
+meta(Idx, Timestamp) ->
+ #{index => Idx,
+ term => 1,
+ system_time => Timestamp,
+ from => {make_ref(), self()}}.
+
+enq(Idx, MsgSeq, Msg, State) ->
+ strip_reply(
+ apply(meta(Idx), rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), State)).
+
+deq(Idx, Cid, Settlement, State0) ->
+ {State, {dequeue, {MsgId, Msg}, _}, _} =
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {dequeue, Settlement}, #{}),
+ State0),
+ {State, {MsgId, Msg}}.
+
+check_n(Cid, Idx, N, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {auto, N, simple_prefetch}, #{}),
+ State)).
+
+check(Cid, Idx, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {once, 1, simple_prefetch}, #{}),
+ State)).
+
+check_auto(Cid, Idx, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {auto, 1, simple_prefetch}, #{}),
+ State)).
+
+check(Cid, Idx, Num, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {auto, Num, simple_prefetch}, #{}),
+ State)).
+
+settle(Cid, Idx, MsgId, State) ->
+ strip_reply(apply(meta(Idx), rabbit_fifo:make_settle(Cid, [MsgId]), State)).
+
+credit(Cid, Idx, Credit, DelCnt, Drain, State) ->
+ strip_reply(apply(meta(Idx), rabbit_fifo:make_credit(Cid, Credit, DelCnt, Drain),
+ State)).
+
+strip_reply({State, _, Effects}) ->
+ {State, Effects}.
+
+run_log(InitState, Entries) ->
+ lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) ->
+ case apply(meta(Idx), E, Acc0) of
+ {Acc, _, Efx} when is_list(Efx) ->
+ {Acc, Efx0 ++ Efx};
+ {Acc, _, Efx} ->
+ {Acc, Efx0 ++ [Efx]};
+ {Acc, _} ->
+ {Acc, Efx0}
+ end
+ end, {InitState, []}, Entries).
+
+
+%% AUX Tests
+
+aux_test(_) ->
+ _ = ra_machine_ets:start_link(),
+ Aux0 = init_aux(aux_test),
+ MacState = init(#{name => aux_test,
+ queue_resource =>
+ rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ ok = meck:new(ra_log, []),
+ Log = mock_log,
+ meck:expect(ra_log, last_index_term, fun (_) -> {0, 0} end),
+ {no_reply, Aux, mock_log} = handle_aux(leader, cast, active, Aux0,
+ Log, MacState),
+ {no_reply, _Aux, mock_log} = handle_aux(leader, cast, tick, Aux,
+ Log, MacState),
+ [X] = ets:lookup(rabbit_fifo_usage, aux_test),
+ meck:unload(),
+ ?assert(X > 0.0),
+ ok.
+
+
+%% machine version conversion test
+
+machine_version_test(_) ->
+ V0 = rabbit_fifo_v0,
+ S0 = V0:init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ Idx = 1,
+ {#rabbit_fifo{}, ok, []} = apply(meta(Idx), {machine_version, 0, 1}, S0),
+
+ Cid = {atom_to_binary(?FUNCTION_NAME, utf8), self()},
+ Entries = [
+ {1, rabbit_fifo_v0:make_enqueue(self(), 1, banana)},
+ {2, rabbit_fifo_v0:make_enqueue(self(), 2, apple)},
+ {3, rabbit_fifo_v0:make_checkout(Cid, {auto, 1, unsettled}, #{})}
+ ],
+ {S1, _Effects} = rabbit_fifo_v0_SUITE:run_log(S0, Entries),
+ Self = self(),
+ {#rabbit_fifo{enqueuers = #{Self := #enqueuer{}},
+ consumers = #{Cid := #consumer{priority = 0}},
+ service_queue = S,
+ messages = Msgs}, ok, []} = apply(meta(Idx),
+ {machine_version, 0, 1}, S1),
+ %% validate message conversion to lqueue
+ ?assertEqual(1, lqueue:len(Msgs)),
+ ?assert(priority_queue:is_queue(S)),
+ ok.
+
+queue_ttl_test(_) ->
+ QName = rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ Conf = #{name => ?FUNCTION_NAME,
+ queue_resource => QName,
+ created => 1000,
+ expires => 1000},
+ S0 = rabbit_fifo:init(Conf),
+ Now = 1500,
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now, S0),
+ %% this should delete the queue
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 1000, S0),
+ %% adding a consumer should not ever trigger deletion
+ Cid = {<<"cid1">>, self()},
+ {S1, _} = check_auto(Cid, 1, S0),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now, S1),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S1),
+ %% cancelling the consumer should then
+ {S2, _, _} = apply(meta(2, Now),
+ rabbit_fifo:make_checkout(Cid, cancel, #{}), S1),
+ %% last_active should have been reset when consumer was cancelled
+ %% last_active = 2500
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S2),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S2),
+
+ %% Same for downs
+ {S2D, _, _} = apply(meta(2, Now),
+ {down, self(), noconnection}, S1),
+ %% last_active should have been reset when consumer was cancelled
+ %% last_active = 2500
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S2D),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S2D),
+
+ %% dequeue should set last applied
+ {S1Deq, {dequeue, empty}} =
+ apply(meta(2, Now),
+ rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ S0),
+
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S1Deq),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S1Deq),
+ %% Enqueue message,
+ {E1, _, _} = apply(meta(2, Now),
+ rabbit_fifo:make_enqueue(self(), 1, msg1), S0),
+ Deq = {<<"deq1">>, self()},
+ {E2, {dequeue, {MsgId, _}, _}, _} =
+ apply(meta(3, Now),
+ rabbit_fifo:make_checkout(Deq, {dequeue, unsettled}, #{}),
+ E1),
+ {E3, _, _} = apply(meta(3, Now + 1000),
+ rabbit_fifo:make_settle(Deq, [MsgId]), E2),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1500, E3),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 3000, E3),
+
+ ok.
+
+queue_ttl_with_single_active_consumer_test(_) ->
+ QName = rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ Conf = #{name => ?FUNCTION_NAME,
+ queue_resource => QName,
+ created => 1000,
+ expires => 1000,
+ single_active_consumer_on => true},
+ S0 = rabbit_fifo:init(Conf),
+ Now = 1500,
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now, S0),
+ %% this should delete the queue
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 1000, S0),
+ %% adding a consumer should not ever trigger deletion
+ Cid = {<<"cid1">>, self()},
+ {S1, _} = check_auto(Cid, 1, S0),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now, S1),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S1),
+ %% cancelling the consumer should then
+ {S2, _, _} = apply(meta(2, Now),
+ rabbit_fifo:make_checkout(Cid, cancel, #{}), S1),
+ %% last_active should have been reset when consumer was cancelled
+ %% last_active = 2500
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S2),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S2),
+
+ %% Same for downs
+ {S2D, _, _} = apply(meta(2, Now),
+ {down, self(), noconnection}, S1),
+ %% last_active should have been reset when consumer was cancelled
+ %% last_active = 2500
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S2D),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S2D),
+
+ ok.
+
+query_peek_test(_) ->
+ State0 = test_init(test),
+ ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(1, State0)),
+ {State1, _} = enq(1, 1, first, State0),
+ {State2, _} = enq(2, 2, second, State1),
+ ?assertMatch({ok, {_, {_, first}}}, rabbit_fifo:query_peek(1, State1)),
+ ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(2, State1)),
+ ?assertMatch({ok, {_, {_, first}}}, rabbit_fifo:query_peek(1, State2)),
+ ?assertMatch({ok, {_, {_, second}}}, rabbit_fifo:query_peek(2, State2)),
+ ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(3, State2)),
+ ok.
+
+checkout_priority_test(_) ->
+ Cid = {<<"checkout_priority_test">>, self()},
+ Pid = spawn(fun () -> ok end),
+ Cid2 = {<<"checkout_priority_test2">>, Pid},
+ Args = [{<<"x-priority">>, long, 1}],
+ {S1, _, _} =
+ apply(meta(3),
+ rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch},
+ #{args => Args}),
+ test_init(test)),
+ {S2, _, _} =
+ apply(meta(3),
+ rabbit_fifo:make_checkout(Cid2, {once, 2, simple_prefetch},
+ #{args => []}),
+ S1),
+ {S3, E3} = enq(1, 1, first, S2),
+ ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E3),
+ {S4, E4} = enq(2, 2, second, S3),
+ ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E4),
+ {_S5, E5} = enq(3, 3, third, S4),
+ ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == Pid, E5),
+ ok.
+
+%% Utility
+
+init(Conf) -> rabbit_fifo:init(Conf).
+make_register_enqueuer(Pid) -> rabbit_fifo:make_register_enqueuer(Pid).
+apply(Meta, Entry, State) -> rabbit_fifo:apply(Meta, Entry, State).
+init_aux(Conf) -> rabbit_fifo:init_aux(Conf).
+handle_aux(S, T, C, A, L, M) -> rabbit_fifo:handle_aux(S, T, C, A, L, M).
+make_checkout(C, S, M) -> rabbit_fifo:make_checkout(C, S, M).
diff --git a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl
new file mode 100644
index 0000000000..37f5436dbf
--- /dev/null
+++ b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl
@@ -0,0 +1,661 @@
+-module(rabbit_fifo_int_SUITE).
+
+%% rabbit_fifo and rabbit_fifo_client integration suite
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(RA_EVENT_TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+all_tests() ->
+ [
+ basics,
+ return,
+ rabbit_fifo_returns_correlation,
+ resends_lost_command,
+ returns_after_down,
+ resends_after_lost_applied,
+ handles_reject_notification,
+ two_quick_enqueues,
+ detects_lost_delivery,
+ dequeue,
+ discard,
+ cancel_checkout,
+ credit,
+ untracked_enqueue,
+ flow,
+ test_queries,
+ duplicate_delivery,
+ usage
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_group(_, Config) ->
+ PrivDir = ?config(priv_dir, Config),
+ _ = application:load(ra),
+ ok = application:set_env(ra, data_dir, PrivDir),
+ application:ensure_all_started(ra),
+ application:ensure_all_started(lg),
+ Config.
+
+end_per_group(_, Config) ->
+ _ = application:stop(ra),
+ Config.
+
+init_per_testcase(TestCase, Config) ->
+ meck:new(rabbit_quorum_queue, [passthrough]),
+ meck:expect(rabbit_quorum_queue, handle_tick, fun (_, _, _) -> ok end),
+ meck:expect(rabbit_quorum_queue, file_handle_leader_reservation, fun (_) -> ok end),
+ meck:expect(rabbit_quorum_queue, file_handle_other_reservation, fun () -> ok end),
+ meck:expect(rabbit_quorum_queue, cancel_consumer_handler,
+ fun (_, _) -> ok end),
+ ra_server_sup_sup:remove_all(),
+ ServerName2 = list_to_atom(atom_to_list(TestCase) ++ "2"),
+ ServerName3 = list_to_atom(atom_to_list(TestCase) ++ "3"),
+ ClusterName = rabbit_misc:r("/", queue, atom_to_binary(TestCase, utf8)),
+ [
+ {cluster_name, ClusterName},
+ {uid, atom_to_binary(TestCase, utf8)},
+ {node_id, {TestCase, node()}},
+ {uid2, atom_to_binary(ServerName2, utf8)},
+ {node_id2, {ServerName2, node()}},
+ {uid3, atom_to_binary(ServerName3, utf8)},
+ {node_id3, {ServerName3, node()}}
+ | Config].
+
+end_per_testcase(_, Config) ->
+ meck:unload(),
+ Config.
+
+basics(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ UId = ?config(uid, Config),
+ CustomerTag = UId,
+ ok = start_cluster(ClusterName, [ServerId]),
+ FState0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, FState1} = rabbit_fifo_client:checkout(CustomerTag, 1, simple_prefetch,
+ #{}, FState0),
+
+ ra_log_wal:force_roll_over(ra_log_wal),
+ % create segment the segment will trigger a snapshot
+ timer:sleep(1000),
+
+ {ok, FState2} = rabbit_fifo_client:enqueue(one, FState1),
+ % process ra events
+ FState3 = process_ra_event(FState2, ?RA_EVENT_TIMEOUT),
+
+ FState5 = receive
+ {ra_event, From, Evt} ->
+ case rabbit_fifo_client:handle_ra_event(From, Evt, FState3) of
+ {ok, FState4,
+ [{deliver, C, true,
+ [{_Qname, _QRef, MsgId, _SomBool, _Msg}]}]} ->
+ {S, _A} = rabbit_fifo_client:settle(C, [MsgId], FState4),
+ S
+ end
+ after 5000 ->
+ exit(await_msg_timeout)
+ end,
+
+ % process settle applied notification
+ FState5b = process_ra_event(FState5, ?RA_EVENT_TIMEOUT),
+ _ = ra:stop_server(ServerId),
+ _ = ra:restart_server(ServerId),
+
+ %% wait for leader change to notice server is up again
+ receive
+ {ra_event, _, {machine, leader_change}} -> ok
+ after 5000 ->
+ exit(leader_change_timeout)
+ end,
+
+ {ok, FState6} = rabbit_fifo_client:enqueue(two, FState5b),
+ % process applied event
+ FState6b = process_ra_event(FState6, ?RA_EVENT_TIMEOUT),
+
+ receive
+ {ra_event, Frm, E} ->
+ case rabbit_fifo_client:handle_ra_event(Frm, E, FState6b) of
+ {ok, FState7, [{deliver, Ctag, true,
+ [{_, _, Mid, _, two}]}]} ->
+ {_, _} = rabbit_fifo_client:return(Ctag, [Mid], FState7),
+ ok
+ end
+ after 2000 ->
+ exit(await_msg_timeout)
+ end,
+ ra:stop_server(ServerId),
+ ok.
+
+return(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F00 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F0} = rabbit_fifo_client:enqueue(1, msg1, F00),
+ {ok, F1} = rabbit_fifo_client:enqueue(2, msg2, F0),
+ {_, _, F2} = process_ra_events(receive_ra_events(2, 0), F1),
+ {ok, _, {_, _, MsgId, _, _}, F} = rabbit_fifo_client:dequeue(<<"tag">>, unsettled, F2),
+ _F2 = rabbit_fifo_client:return(<<"tag">>, [MsgId], F),
+
+ ra:stop_server(ServerId),
+ ok.
+
+rabbit_fifo_returns_correlation(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:enqueue(corr1, msg1, F0),
+ receive
+ {ra_event, Frm, E} ->
+ case rabbit_fifo_client:handle_ra_event(Frm, E, F1) of
+ {ok, _F2, [{settled, _, _}]} ->
+ ok;
+ Del ->
+ exit({unexpected, Del})
+ end
+ after 2000 ->
+ exit(await_msg_timeout)
+ end,
+ ra:stop_server(ServerId),
+ ok.
+
+duplicate_delivery(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(corr1, msg1, F1),
+ Fun = fun Loop(S0) ->
+ receive
+ {ra_event, Frm, E} = Evt ->
+ case rabbit_fifo_client:handle_ra_event(Frm, E, S0) of
+ {ok, S1, [{settled, _, _}]} ->
+ Loop(S1);
+ {ok, S1, _} ->
+ %% repeat event delivery
+ self() ! Evt,
+ %% check that then next received delivery doesn't
+ %% repeat or crash
+ receive
+ {ra_event, F, E1} ->
+ case rabbit_fifo_client:handle_ra_event(
+ F, E1, S1) of
+ {ok, S2, _} ->
+ S2
+ end
+ end
+ end
+ after 2000 ->
+ exit(await_msg_timeout)
+ end
+ end,
+ Fun(F2),
+ ra:stop_server(ServerId),
+ ok.
+
+usage(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(corr1, msg1, F1),
+ {ok, F3} = rabbit_fifo_client:enqueue(corr2, msg2, F2),
+ {_, _, _} = process_ra_events(receive_ra_events(2, 2), F3),
+ % force tick and usage stats emission
+ ServerId ! tick_timeout,
+ timer:sleep(50),
+ Use = rabbit_fifo:usage(element(1, ServerId)),
+ ra:stop_server(ServerId),
+ ?assert(Use > 0.0),
+ ok.
+
+resends_lost_command(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ ok = meck:new(ra, [passthrough]),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:enqueue(msg1, F0),
+ % lose the enqueue
+ meck:expect(ra, pipeline_command, fun (_, _, _) -> ok end),
+ {ok, F2} = rabbit_fifo_client:enqueue(msg2, F1),
+ meck:unload(ra),
+ {ok, F3} = rabbit_fifo_client:enqueue(msg3, F2),
+ {_, _, F4} = process_ra_events(receive_ra_events(2, 0), F3),
+ {ok, _, {_, _, _, _, msg1}, F5} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F4),
+ {ok, _, {_, _, _, _, msg2}, F6} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F5),
+ {ok, _, {_, _, _, _, msg3}, _F7} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F6),
+ ra:stop_server(ServerId),
+ ok.
+
+two_quick_enqueues(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ F1 = element(2, rabbit_fifo_client:enqueue(msg1, F0)),
+ {ok, F2} = rabbit_fifo_client:enqueue(msg2, F1),
+ _ = process_ra_events(receive_ra_events(2, 0), F2),
+ ra:stop_server(ServerId),
+ ok.
+
+detects_lost_delivery(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F000 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F00} = rabbit_fifo_client:enqueue(msg1, F000),
+ {_, _, F0} = process_ra_events(receive_ra_events(1, 0), F00),
+ {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(msg2, F1),
+ {ok, F3} = rabbit_fifo_client:enqueue(msg3, F2),
+ % lose first delivery
+ receive
+ {ra_event, _, {machine, {delivery, _, [{_, {_, msg1}}]}}} ->
+ ok
+ after 5000 ->
+ exit(await_delivery_timeout)
+ end,
+
+ % assert three deliveries were received
+ {[_, _, _], _, _} = process_ra_events(receive_ra_events(2, 2), F3),
+ ra:stop_server(ServerId),
+ ok.
+
+returns_after_down(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:enqueue(msg1, F0),
+ {_, _, F2} = process_ra_events(receive_ra_events(1, 0), F1),
+ % start a customer in a separate processes
+ % that exits after checkout
+ Self = self(),
+ _Pid = spawn(fun () ->
+ F = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, _} = rabbit_fifo_client:checkout(<<"tag">>, 10,
+ simple_prefetch,
+ #{}, F),
+ Self ! checkout_done
+ end),
+ receive checkout_done -> ok after 1000 -> exit(checkout_done_timeout) end,
+ timer:sleep(1000),
+ % message should be available for dequeue
+ {ok, _, {_, _, _, _, msg1}, _} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F2),
+ ra:stop_server(ServerId),
+ ok.
+
+resends_after_lost_applied(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:enqueue(msg1, F0),
+ {_, _, F2} = process_ra_events(receive_ra_events(1, 0), F1),
+ {ok, F3} = rabbit_fifo_client:enqueue(msg2, F2),
+ % lose an applied event
+ receive
+ {ra_event, _, {applied, _}} ->
+ ok
+ after 500 ->
+ exit(await_ra_event_timeout)
+ end,
+ % send another message
+ {ok, F4} = rabbit_fifo_client:enqueue(msg3, F3),
+ {_, _, F5} = process_ra_events(receive_ra_events(1, 0), F4),
+ {ok, _, {_, _, _, _, msg1}, F6} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F5),
+ {ok, _, {_, _, _, _, msg2}, F7} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F6),
+ {ok, _, {_, _, _, _, msg3}, _F8} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F7),
+ ra:stop_server(ServerId),
+ ok.
+
+handles_reject_notification(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId1 = ?config(node_id, Config),
+ ServerId2 = ?config(node_id2, Config),
+ UId1 = ?config(uid, Config),
+ CId = {UId1, self()},
+
+ ok = start_cluster(ClusterName, [ServerId1, ServerId2]),
+ _ = ra:process_command(ServerId1,
+ rabbit_fifo:make_checkout(
+ CId,
+ {auto, 10, simple_prefetch},
+ #{})),
+ % reverse order - should try the first node in the list first
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId2, ServerId1]),
+ {ok, F1} = rabbit_fifo_client:enqueue(one, F0),
+
+ timer:sleep(500),
+
+ % the applied notification
+ _F2 = process_ra_events(receive_ra_events(1, 0), F1),
+ ra:stop_server(ServerId1),
+ ra:stop_server(ServerId2),
+ ok.
+
+discard(Config) ->
+ PrivDir = ?config(priv_dir, Config),
+ ServerId = ?config(node_id, Config),
+ UId = ?config(uid, Config),
+ ClusterName = ?config(cluster_name, Config),
+ Conf = #{cluster_name => ClusterName#resource.name,
+ id => ServerId,
+ uid => UId,
+ log_init_args => #{data_dir => PrivDir, uid => UId},
+ initial_member => [],
+ machine => {module, rabbit_fifo,
+ #{queue_resource => discard,
+ dead_letter_handler =>
+ {?MODULE, dead_letter_handler, [self()]}}}},
+ _ = ra:start_server(Conf),
+ ok = ra:trigger_election(ServerId),
+ _ = ra:members(ServerId),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10,
+ simple_prefetch, #{}, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(msg1, F1),
+ F3 = discard_next_delivery(F2, 5000),
+ {empty, _F4} = rabbit_fifo_client:dequeue(<<"tag1">>, settled, F3),
+ receive
+ {dead_letter, Letters} ->
+ [{_, msg1}] = Letters,
+ ok
+ after 500 ->
+ flush(),
+ exit(dead_letter_timeout)
+ end,
+ ra:stop_server(ServerId),
+ ok.
+
+cancel_checkout(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 4),
+ {ok, F1} = rabbit_fifo_client:enqueue(m1, F0),
+ {ok, F2} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F1),
+ {_, _, F3} = process_ra_events(receive_ra_events(1, 1), F2, [], [], fun (_, S) -> S end),
+ {ok, F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, F3),
+ {F5, _} = rabbit_fifo_client:return(<<"tag">>, [0], F4),
+ {ok, _, {_, _, _, _, m1}, F5} = rabbit_fifo_client:dequeue(<<"d1">>, settled, F5),
+ ok.
+
+credit(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 4),
+ {ok, F1} = rabbit_fifo_client:enqueue(m1, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(m2, F1),
+ {_, _, F3} = process_ra_events(receive_ra_events(2, 0), F2),
+ %% checkout with 0 prefetch
+ {ok, F4} = rabbit_fifo_client:checkout(<<"tag">>, 0, credited, #{}, F3),
+ %% assert no deliveries
+ {_, _, F5} = process_ra_events(receive_ra_events(), F4, [], [],
+ fun
+ (D, _) -> error({unexpected_delivery, D})
+ end),
+ %% provide some credit
+ {F6, []} = rabbit_fifo_client:credit(<<"tag">>, 1, false, F5),
+ {[{_, _, _, _, m1}], [{send_credit_reply, _}], F7} =
+ process_ra_events(receive_ra_events(1, 1), F6),
+
+ %% credit and drain
+ {F8, []} = rabbit_fifo_client:credit(<<"tag">>, 4, true, F7),
+ {[{_, _, _, _, m2}], [{send_credit_reply, _}, {send_drained, _}], F9} =
+ process_ra_events(receive_ra_events(1, 1), F8),
+ flush(),
+
+ %% enqueue another message - at this point the consumer credit should be
+ %% all used up due to the drain
+ {ok, F10} = rabbit_fifo_client:enqueue(m3, F9),
+ %% assert no deliveries
+ {_, _, F11} = process_ra_events(receive_ra_events(), F10, [], [],
+ fun
+ (D, _) -> error({unexpected_delivery, D})
+ end),
+ %% credit again and receive the last message
+ {F12, []} = rabbit_fifo_client:credit(<<"tag">>, 10, false, F11),
+ {[{_, _, _, _, m3}], _, _} = process_ra_events(receive_ra_events(1, 1), F12),
+ ok.
+
+untracked_enqueue(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ ok = rabbit_fifo_client:untracked_enqueue([ServerId], msg1),
+ timer:sleep(100),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, _, {_, _, _, _, msg1}, _F5} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F0),
+ ra:stop_server(ServerId),
+ ok.
+
+
+flow(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 3),
+ {ok, F1} = rabbit_fifo_client:enqueue(m1, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(m2, F1),
+ {ok, F3} = rabbit_fifo_client:enqueue(m3, F2),
+ {slow, F4} = rabbit_fifo_client:enqueue(m4, F3),
+ {_, _, F5} = process_ra_events(receive_ra_events(4, 0), F4),
+ {ok, _} = rabbit_fifo_client:enqueue(m5, F5),
+ ra:stop_server(ServerId),
+ ok.
+
+test_queries(Config) ->
+ % ok = logger:set_primary_config(level, all),
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ Self = self(),
+ P = spawn(fun () ->
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 4),
+ {ok, F1} = rabbit_fifo_client:enqueue(m1, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(m2, F1),
+ process_ra_events(receive_ra_events(2, 0), F2),
+ Self ! ready,
+ receive stop -> ok end
+ end),
+ receive
+ ready -> ok
+ after 5000 ->
+ exit(ready_timeout)
+ end,
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 4),
+ {ok, _} = rabbit_fifo_client:checkout(<<"tag">>, 1, simple_prefetch, #{}, F0),
+ {ok, {_, Ready}, _} = ra:local_query(ServerId,
+ fun rabbit_fifo:query_messages_ready/1),
+ ?assertEqual(1, Ready),
+ {ok, {_, Checked}, _} = ra:local_query(ServerId,
+ fun rabbit_fifo:query_messages_checked_out/1),
+ ?assertEqual(1, Checked),
+ {ok, {_, Processes}, _} = ra:local_query(ServerId,
+ fun rabbit_fifo:query_processes/1),
+ ?assertEqual(2, length(Processes)),
+ P ! stop,
+ ra:stop_server(ServerId),
+ ok.
+
+dead_letter_handler(Pid, Msgs) ->
+ Pid ! {dead_letter, Msgs}.
+
+dequeue(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ UId = ?config(uid, Config),
+ Tag = UId,
+ ok = start_cluster(ClusterName, [ServerId]),
+ F1 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {empty, F1b} = rabbit_fifo_client:dequeue(Tag, settled, F1),
+ {ok, F2_} = rabbit_fifo_client:enqueue(msg1, F1b),
+ {_, _, F2} = process_ra_events(receive_ra_events(1, 0), F2_),
+
+ % {ok, {{0, {_, msg1}}, _}, F3} = rabbit_fifo_client:dequeue(Tag, settled, F2),
+ {ok, _, {_, _, 0, _, msg1}, F3} = rabbit_fifo_client:dequeue(Tag, settled, F2),
+ {ok, F4_} = rabbit_fifo_client:enqueue(msg2, F3),
+ {_, _, F4} = process_ra_events(receive_ra_events(1, 0), F4_),
+ {ok, _, {_, _, MsgId, _, msg2}, F5} = rabbit_fifo_client:dequeue(Tag, unsettled, F4),
+ {_F6, _A} = rabbit_fifo_client:settle(Tag, [MsgId], F5),
+ ra:stop_server(ServerId),
+ ok.
+
+conf(ClusterName, UId, ServerId, _, Peers) ->
+ #{cluster_name => ClusterName,
+ id => ServerId,
+ uid => UId,
+ log_init_args => #{uid => UId},
+ initial_members => Peers,
+ machine => {module, rabbit_fifo, #{}}}.
+
+process_ra_event(State, Wait) ->
+ receive
+ {ra_event, From, Evt} ->
+ {ok, S, _Actions} =
+ rabbit_fifo_client:handle_ra_event(From, Evt, State),
+ S
+ after Wait ->
+ exit(ra_event_timeout)
+ end.
+
+receive_ra_events(Applied, Deliveries) ->
+ receive_ra_events(Applied, Deliveries, []).
+
+receive_ra_events(Applied, Deliveries, Acc) when Applied =< 0, Deliveries =< 0->
+ %% what if we get more events? Testcases should check what they're!
+ lists:reverse(Acc);
+receive_ra_events(Applied, Deliveries, Acc) ->
+ receive
+ {ra_event, _, {applied, Seqs}} = Evt ->
+ receive_ra_events(Applied - length(Seqs), Deliveries, [Evt | Acc]);
+ {ra_event, _, {machine, {delivery, _, MsgIds}}} = Evt ->
+ receive_ra_events(Applied, Deliveries - length(MsgIds), [Evt | Acc]);
+ {ra_event, _, _} = Evt ->
+ receive_ra_events(Applied, Deliveries, [Evt | Acc])
+ after 5000 ->
+ exit({missing_events, Applied, Deliveries, Acc})
+ end.
+
+%% Flusing the mailbox to later check that deliveries hasn't been received
+receive_ra_events() ->
+ receive_ra_events([]).
+
+receive_ra_events(Acc) ->
+ receive
+ {ra_event, _, _} = Evt ->
+ receive_ra_events([Evt | Acc])
+ after 500 ->
+ Acc
+ end.
+
+process_ra_events(Events, State) ->
+ DeliveryFun = fun ({deliver, _, Tag, Msgs}, S) ->
+ MsgIds = [element(1, M) || M <- Msgs],
+ {S0, _} = rabbit_fifo_client:settle(Tag, MsgIds, S),
+ S0
+ end,
+ process_ra_events(Events, State, [], [], DeliveryFun).
+
+process_ra_events([], State0, Acc, Actions0, _DeliveryFun) ->
+ {Acc, Actions0, State0};
+process_ra_events([{ra_event, From, Evt} | Events], State0, Acc, Actions0, DeliveryFun) ->
+ case rabbit_fifo_client:handle_ra_event(From, Evt, State0) of
+ {ok, State1, Actions1} ->
+ {Msgs, Actions, State} =
+ lists:foldl(
+ fun ({deliver, _, _, Msgs} = Del, {M, A, S}) ->
+ {M ++ Msgs, A, DeliveryFun(Del, S)};
+ (Ac, {M, A, S}) ->
+ {M, A ++ [Ac], S}
+ end, {Acc, [], State1}, Actions1),
+ process_ra_events(Events, State, Msgs, Actions0 ++ Actions, DeliveryFun);
+ eol ->
+ eol
+ end.
+
+discard_next_delivery(State0, Wait) ->
+ receive
+ {ra_event, _, {machine, {delivery, _, _}}} = Evt ->
+ element(3, process_ra_events([Evt], State0, [], [],
+ fun ({deliver, Tag, _, Msgs}, S) ->
+ MsgIds = [element(3, M) || M <- Msgs],
+ {S0, _} = rabbit_fifo_client:discard(Tag, MsgIds, S),
+ S0
+ end))
+ after Wait ->
+ State0
+ end.
+
+return_next_delivery(State0, Wait) ->
+ receive
+ {ra_event, _, {machine, {delivery, _, _}}} = Evt ->
+ element(3, process_ra_events([Evt], State0, [], [],
+ fun ({deliver, Tag, _, Msgs}, S) ->
+ MsgIds = [element(3, M) || M <- Msgs],
+ {S0, _} = rabbit_fifo_client:return(Tag, MsgIds, S),
+ S0
+ end))
+ after Wait ->
+ State0
+ end.
+
+validate_process_down(Name, 0) ->
+ exit({process_not_down, Name});
+validate_process_down(Name, Num) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ timer:sleep(100),
+ validate_process_down(Name, Num-1)
+ end.
+
+start_cluster(ClusterName, ServerIds, RaFifoConfig) ->
+ {ok, Started, _} = ra:start_cluster(ClusterName#resource.name,
+ {module, rabbit_fifo, RaFifoConfig},
+ ServerIds),
+ ?assertEqual(length(Started), length(ServerIds)),
+ ok.
+
+start_cluster(ClusterName, ServerIds) ->
+ start_cluster(ClusterName, ServerIds, #{name => some_name,
+ queue_resource => ClusterName}).
+
+flush() ->
+ receive
+ Msg ->
+ ct:pal("flushed: ~w~n", [Msg]),
+ flush()
+ after 10 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl
new file mode 100644
index 0000000000..859db2178f
--- /dev/null
+++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl
@@ -0,0 +1,1211 @@
+-module(rabbit_fifo_prop_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("proper/include/proper.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("src/rabbit_fifo.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ test_run_log,
+ snapshots,
+ scenario1,
+ scenario2,
+ scenario3,
+ scenario4,
+ scenario5,
+ scenario6,
+ scenario7,
+ scenario8,
+ scenario9,
+ scenario10,
+ scenario11,
+ scenario12,
+ scenario13,
+ scenario14,
+ scenario15,
+ scenario16,
+ scenario17,
+ scenario18,
+ scenario19,
+ scenario20,
+ scenario21,
+ scenario22,
+ single_active,
+ single_active_01,
+ single_active_02,
+ single_active_03,
+ single_active_ordering,
+ single_active_ordering_01,
+ single_active_ordering_03,
+ in_memory_limit,
+ max_length
+ % single_active_ordering_02
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+% -type log_op() ::
+% {enqueue, pid(), maybe(msg_seqno()), Msg :: raw_msg()}.
+
+scenario1(_Config) ->
+ C1 = {<<>>, c:pid(0,6723,1)},
+ C2 = {<<0>>,c:pid(0,6723,1)},
+ E = c:pid(0,6720,1),
+
+ Commands = [
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E,1,msg1),
+ make_enqueue(E,2,msg2),
+ make_checkout(C1, cancel), %% both on returns queue
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_return(C2, [0]), %% E1 in returns, E2 with C2
+ make_return(C2, [1]), %% E2 in returns E1 with C2
+ make_settle(C2, [2]) %% E2 with C2
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario2(_Config) ->
+ C1 = {<<>>, c:pid(0,346,1)},
+ C2 = {<<>>,c:pid(0,379,1)},
+ E = c:pid(0,327,1),
+ Commands = [make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,msg1),
+ make_checkout(C1, cancel),
+ make_enqueue(E,2,msg2),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_settle(C1, [0]),
+ make_settle(C2, [0])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario3(_Config) ->
+ C1 = {<<>>, c:pid(0,179,1)},
+ E = c:pid(0,176,1),
+ Commands = [make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E,1,msg1),
+ make_return(C1, [0]),
+ make_enqueue(E,2,msg2),
+ make_enqueue(E,3,msg3),
+ make_settle(C1, [1]),
+ make_settle(C1, [2])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario4(_Config) ->
+ C1 = {<<>>, c:pid(0,179,1)},
+ E = c:pid(0,176,1),
+ Commands = [make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,msg),
+ make_settle(C1, [0])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario5(_Config) ->
+ C1 = {<<>>, c:pid(0,505,0)},
+ E = c:pid(0,465,9),
+ Commands = [make_enqueue(E,1,<<0>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,2,<<>>),
+ make_settle(C1,[0])],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario6(_Config) ->
+ E = c:pid(0,465,9),
+ Commands = [make_enqueue(E,1,<<>>), %% 1 msg on queue - snap: prefix 1
+ make_enqueue(E,2,<<>>) %% 1. msg on queue - snap: prefix 1
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario7(_Config) ->
+ C1 = {<<>>, c:pid(0,208,0)},
+ E = c:pid(0,188,0),
+ Commands = [
+ make_enqueue(E,1,<<>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,2,<<>>),
+ make_enqueue(E,3,<<>>),
+ make_settle(C1,[0])],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario8(_Config) ->
+ C1 = {<<>>, c:pid(0,208,0)},
+ E = c:pid(0,188,0),
+ Commands = [
+ make_enqueue(E,1,<<>>),
+ make_enqueue(E,2,<<>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ % make_checkout(C1, cancel),
+ {down, E, noconnection},
+ make_settle(C1, [0])],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario9(_Config) ->
+ E = c:pid(0,188,0),
+ Commands = [
+ make_enqueue(E,1,<<>>),
+ make_enqueue(E,2,<<>>),
+ make_enqueue(E,3,<<>>)],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario10(_Config) ->
+ C1 = {<<>>, c:pid(0,208,0)},
+ E = c:pid(0,188,0),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,<<>>),
+ make_settle(C1, [0])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario11(_Config) ->
+ C1 = {<<>>, c:pid(0,215,0)},
+ E = c:pid(0,217,0),
+ Commands = [
+ make_enqueue(E,1,<<>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_checkout(C1, cancel),
+ make_enqueue(E,2,<<>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_settle(C1, [0]),
+ make_checkout(C1, cancel)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 2}, Commands),
+ ok.
+
+scenario12(_Config) ->
+ E = c:pid(0,217,0),
+ Commands = [make_enqueue(E,1,<<0>>),
+ make_enqueue(E,2,<<0>>),
+ make_enqueue(E,3,<<0>>)],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_bytes => 2}, Commands),
+ ok.
+
+scenario13(_Config) ->
+ E = c:pid(0,217,0),
+ Commands = [make_enqueue(E,1,<<0>>),
+ make_enqueue(E,2,<<>>),
+ make_enqueue(E,3,<<>>),
+ make_enqueue(E,4,<<>>)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 2}, Commands),
+ ok.
+
+scenario14(_Config) ->
+ E = c:pid(0,217,0),
+ Commands = [make_enqueue(E,1,<<0,0>>)],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_bytes => 1}, Commands),
+ ok.
+
+scenario15(_Config) ->
+ C1 = {<<>>, c:pid(0,179,1)},
+ E = c:pid(0,176,1),
+ Commands = [make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E, 1, msg1),
+ make_enqueue(E, 2, msg2),
+ make_return(C1, [0]),
+ make_return(C1, [2]),
+ make_settle(C1, [1])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ delivery_limit => 1}, Commands),
+ ok.
+
+scenario16(_Config) ->
+ C1Pid = c:pid(0,883,1),
+ C1 = {<<>>, C1Pid},
+ C2 = {<<>>, c:pid(0,882,1)},
+ E = c:pid(0,176,1),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E, 1, msg1),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ {down, C1Pid, noproc}, %% msg1 allocated to C2
+ make_return(C2, [0]), %% msg1 returned
+ make_enqueue(E, 2, <<>>),
+ make_settle(C2, [0])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ delivery_limit => 1}, Commands),
+ ok.
+
+scenario17(_Config) ->
+ C1Pid = test_util:fake_pid(rabbit@fake_node1),
+ C1 = {<<0>>, C1Pid},
+ % C2Pid = test_util:fake_pid(fake_node1),
+ C2 = {<<>>, C1Pid},
+ E = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,<<"one">>),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ {down, C1Pid, noconnection},
+ make_checkout(C2, cancel),
+ make_enqueue(E,2,<<"two">>),
+ {nodeup,rabbit@fake_node1},
+ %% this has no effect as was returned
+ make_settle(C1, [0]),
+ %% this should settle "one"
+ make_settle(C1, [1])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ single_active_consumer_on => true
+ }, Commands),
+ ok.
+
+scenario18(_Config) ->
+ E = c:pid(0,176,1),
+ Commands = [make_enqueue(E,1,<<"1">>),
+ make_enqueue(E,2,<<"2">>),
+ make_enqueue(E,3,<<"3">>),
+ make_enqueue(E,4,<<"4">>),
+ make_enqueue(E,5,<<"5">>)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ %% max_length => 3,
+ max_in_memory_length => 1}, Commands),
+ ok.
+
+scenario19(_Config) ->
+ C1Pid = c:pid(0,883,1),
+ C1 = {<<>>, C1Pid},
+ E = c:pid(0,176,1),
+ Commands = [make_enqueue(E,1,<<"1">>),
+ make_enqueue(E,2,<<"2">>),
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E,3,<<"3">>),
+ make_settle(C1, [0, 1])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_in_memory_bytes => 370,
+ max_in_memory_length => 1}, Commands),
+ ok.
+
+scenario20(_Config) ->
+ C1Pid = c:pid(0,883,1),
+ C1 = {<<>>, C1Pid},
+ E = c:pid(0,176,1),
+ Commands = [make_enqueue(E,1,<<>>),
+ make_enqueue(E,2,<<>>),
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ {down, C1Pid, noconnection},
+ make_enqueue(E,3,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>),
+ make_enqueue(E,4,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>),
+ make_enqueue(E,5,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>),
+ make_enqueue(E,6,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>),
+ make_enqueue(E,7,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0>>)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_bytes => 97,
+ max_in_memory_length => 1}, Commands),
+ ok.
+
+scenario21(_Config) ->
+ C1Pid = c:pid(0,883,1),
+ C1 = {<<>>, C1Pid},
+ E = c:pid(0,176,1),
+ Commands = [
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E,1,<<"1">>),
+ make_enqueue(E,2,<<"2">>),
+ make_enqueue(E,3,<<"3">>),
+ rabbit_fifo:make_discard(C1, [0]),
+ rabbit_fifo:make_settle(C1, [1])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ release_cursor_interval => 1,
+ dead_letter_handler => {?MODULE, banana, []}},
+ Commands),
+ ok.
+
+scenario22(_Config) ->
+ % C1Pid = c:pid(0,883,1),
+ % C1 = {<<>>, C1Pid},
+ E = c:pid(0,176,1),
+ Commands = [
+ make_enqueue(E,1,<<"1">>),
+ make_enqueue(E,2,<<"2">>),
+ make_enqueue(E,3,<<"3">>),
+ make_enqueue(E,4,<<"4">>),
+ make_enqueue(E,5,<<"5">>)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ release_cursor_interval => 1,
+ max_length => 3,
+ dead_letter_handler => {?MODULE, banana, []}},
+ Commands),
+ ok.
+
+single_active_01(_Config) ->
+ C1Pid = test_util:fake_pid(rabbit@fake_node1),
+ C1 = {<<0>>, C1Pid},
+ C2Pid = test_util:fake_pid(rabbit@fake_node2),
+ C2 = {<<>>, C2Pid},
+ E = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,<<"one">>),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_checkout(C1, cancel),
+ {nodeup,rabbit@fake_node1}
+ ],
+ ?assert(
+ single_active_prop(#{name => ?FUNCTION_NAME,
+ single_active_consumer_on => true
+ }, Commands, false)),
+ ok.
+
+single_active_02(_Config) ->
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<0>>, C1Pid},
+ C2Pid = test_util:fake_pid(node()),
+ C2 = {<<>>, C2Pid},
+ E = test_util:fake_pid(node()),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,<<"one">>),
+ {down,E,noconnection},
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_checkout(C2, cancel),
+ {down,E,noconnection}
+ ],
+ Conf = config(?FUNCTION_NAME, undefined, undefined, true, 1, undefined, undefined),
+ ?assert(single_active_prop(Conf, Commands, false)),
+ ok.
+
+single_active_03(_Config) ->
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<0>>, C1Pid},
+ % C2Pid = test_util:fake_pid(rabbit@fake_node2),
+ % C2 = {<<>>, C2Pid},
+ Pid = test_util:fake_pid(node()),
+ E = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E, 1, 0),
+ make_enqueue(E, 2, 1),
+ {down, Pid, noconnection},
+ {nodeup, node()}
+ ],
+ Conf = config(?FUNCTION_NAME, 0, 0, true, 0, undefined, undefined),
+ ?assert(single_active_prop(Conf, Commands, true)),
+ ok.
+
+test_run_log(_Config) ->
+ Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end},
+ run_proper(
+ fun () ->
+ ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit, InMemoryLength,
+ InMemoryBytes},
+ frequency([{10, {0, 0, false, 0, 0, 0}},
+ {5, {oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ boolean(),
+ oneof([range(1, 3), undefined]),
+ oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined])
+ }}]),
+ ?FORALL(O, ?LET(Ops, log_gen(100), expand(Ops, Fun)),
+ collect({log_size, length(O)},
+ dump_generated(
+ config(?FUNCTION_NAME,
+ Length,
+ Bytes,
+ SingleActiveConsumer,
+ DeliveryLimit,
+ InMemoryLength,
+ InMemoryBytes), O))))
+ end, [], 10).
+
+snapshots(_Config) ->
+ run_proper(
+ fun () ->
+ ?FORALL({Length, Bytes, SingleActiveConsumer,
+ DeliveryLimit, InMemoryLength, InMemoryBytes,
+ Overflow},
+ frequency([{10, {0, 0, false, 0, 0, 0, drop_head}},
+ {5, {oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ boolean(),
+ oneof([range(1, 3), undefined]),
+ oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ oneof([drop_head, reject_publish])
+ }}]),
+ begin
+ Config = config(?FUNCTION_NAME,
+ Length,
+ Bytes,
+ SingleActiveConsumer,
+ DeliveryLimit,
+ InMemoryLength,
+ InMemoryBytes,
+ Overflow),
+ ?FORALL(O, ?LET(Ops, log_gen(256), expand(Ops, Config)),
+ collect({log_size, length(O)},
+ snapshots_prop(Config, O)))
+ end)
+ end, [], 2500).
+
+single_active(_Config) ->
+ Size = 2000,
+ run_proper(
+ fun () ->
+ ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, InMemoryBytes},
+ frequency([{10, {0, 0, 0, 0, 0}},
+ {5, {oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ oneof([range(1, 3), undefined]),
+ oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined])
+ }}]),
+ begin
+ Config = config(?FUNCTION_NAME,
+ Length,
+ Bytes,
+ true,
+ DeliveryLimit,
+ InMemoryLength,
+ InMemoryBytes),
+ ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)),
+ collect({log_size, length(O)},
+ single_active_prop(Config, O, false)))
+ end)
+ end, [], Size).
+
+single_active_ordering(_Config) ->
+ Size = 2000,
+ Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end},
+ run_proper(
+ fun () ->
+ ?FORALL(O, ?LET(Ops, log_gen_ordered(Size), expand(Ops, Fun)),
+ collect({log_size, length(O)},
+ single_active_prop(config(?FUNCTION_NAME,
+ undefined,
+ undefined,
+ true,
+ undefined,
+ undefined,
+ undefined), O,
+ true)))
+ end, [], Size).
+
+single_active_ordering_01(_Config) ->
+% [{enqueue,<0.145.0>,1,0},
+% {enqueue,<0.145.0>,1,1},
+% {checkout,{<<>>,<0.148.0>},{auto,1,simple_prefetch},#{ack => true,args => [],prefetch => 1,username => <<117,115,101,114>>}}
+% {enqueue,<0.140.0>,1,2},
+% {settle,{<<>>,<0.148.0>},[0]}]
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<0>>, C1Pid},
+ E = test_util:fake_pid(rabbit@fake_node2),
+ E2 = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_enqueue(E, 1, 0),
+ make_enqueue(E, 2, 1),
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E2, 1, 2),
+ make_settle(C1, [0])
+ ],
+ Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0),
+ ?assert(single_active_prop(Conf, Commands, true)),
+ ok.
+
+single_active_ordering_02(_Config) ->
+ %% this results in the pending enqueue being enqueued and violating
+ %% ordering
+% [{checkout, % {<<>>,<0.177.0>}, % {auto,1,simple_prefetch},
+% {enqueue,<0.172.0>,2,1},
+% {down,<0.172.0>,noproc},
+% {settle,{<<>>,<0.177.0>},[0]}]
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<0>>, C1Pid},
+ E = test_util:fake_pid(node()),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E, 2, 1),
+ %% CANNOT HAPPEN
+ {down,E,noproc},
+ make_settle(C1, [0])
+ ],
+ Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0),
+ ?assert(single_active_prop(Conf, Commands, true)),
+ ok.
+
+single_active_ordering_03(_Config) ->
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<1>>, C1Pid},
+ C2Pid = test_util:fake_pid(rabbit@fake_node2),
+ C2 = {<<2>>, C2Pid},
+ E = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_enqueue(E, 1, 0),
+ make_enqueue(E, 2, 1),
+ make_enqueue(E, 3, 2),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_settle(C1, [0]),
+ make_checkout(C1, cancel),
+ {down, C1Pid, noconnection}
+ ],
+ Conf0 = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0),
+ Conf = Conf0#{release_cursor_interval => 100},
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ try run_log(test_init(Conf), Entries) of
+ {State, Effects} ->
+ ct:pal("Effects: ~p~n", [Effects]),
+ ct:pal("State: ~p~n", [State]),
+ %% assert C1 has no messages
+ ?assertNotMatch(#{C1 := _}, State#rabbit_fifo.consumers),
+ true;
+ _ ->
+ true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+in_memory_limit(_Config) ->
+ Size = 2000,
+ run_proper(
+ fun () ->
+ ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit,
+ InMemoryLength, InMemoryBytes},
+ frequency([{10, {0, 0, false, 0, 0, 0}},
+ {5, {oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ boolean(),
+ oneof([range(1, 3), undefined]),
+ range(1, 10),
+ range(1, 1000)
+ }}]),
+ begin
+ Config = config(?FUNCTION_NAME,
+ Length,
+ Bytes,
+ SingleActiveConsumer,
+ DeliveryLimit,
+ InMemoryLength,
+ InMemoryBytes),
+ ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)),
+ collect({log_size, length(O)},
+ in_memory_limit_prop(Config, O)))
+ end)
+ end, [], Size).
+
+max_length(_Config) ->
+ %% tests that max length is never transgressed
+ Size = 1000,
+ run_proper(
+ fun () ->
+ ?FORALL({Length, SingleActiveConsumer, DeliveryLimit,
+ InMemoryLength},
+ {oneof([range(1, 100), undefined]),
+ boolean(),
+ range(1, 3),
+ range(1, 10)
+ },
+ begin
+ Config = config(?FUNCTION_NAME,
+ Length,
+ undefined,
+ SingleActiveConsumer,
+ DeliveryLimit,
+ InMemoryLength,
+ undefined),
+ ?FORALL(O, ?LET(Ops, log_gen_config(Size),
+ expand(Ops, Config)),
+ collect({log_size, length(O)},
+ max_length_prop(Config, O)))
+ end)
+ end, [], Size).
+
+config(Name, Length, Bytes, SingleActive, DeliveryLimit,
+ InMemoryLength, InMemoryBytes) ->
+config(Name, Length, Bytes, SingleActive, DeliveryLimit,
+ InMemoryLength, InMemoryBytes, drop_head).
+
+config(Name, Length, Bytes, SingleActive, DeliveryLimit,
+ InMemoryLength, InMemoryBytes, Overflow) ->
+ #{name => Name,
+ max_length => map_max(Length),
+ max_bytes => map_max(Bytes),
+ dead_letter_handler => {?MODULE, banana, []},
+ single_active_consumer_on => SingleActive,
+ delivery_limit => map_max(DeliveryLimit),
+ max_in_memory_length => map_max(InMemoryLength),
+ max_in_memory_bytes => map_max(InMemoryBytes),
+ overflow_strategy => Overflow}.
+
+map_max(0) -> undefined;
+map_max(N) -> N.
+
+in_memory_limit_prop(Conf0, Commands) ->
+ Conf = Conf0#{release_cursor_interval => 100},
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ try run_log(test_init(Conf), Entries) of
+ {_State, Effects} ->
+ %% validate message ordering
+ lists:foldl(fun ({log, Idxs, _}, ReleaseCursorIdx) ->
+ validate_idx_order(Idxs, ReleaseCursorIdx),
+ ReleaseCursorIdx;
+ ({release_cursor, Idx, _}, _) ->
+ Idx;
+ (_, Acc) ->
+ Acc
+ end, 0, Effects),
+ true;
+ _ ->
+ true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+max_length_prop(Conf0, Commands) ->
+ Conf = Conf0#{release_cursor_interval => 100},
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ Invariant = fun (#rabbit_fifo{cfg = #cfg{max_length = MaxLen}} = S) ->
+ #{num_ready_messages := MsgReady} = rabbit_fifo:overview(S),
+ % ct:pal("msg Ready ~w ~w", [MsgReady, MaxLen]),
+ MsgReady =< MaxLen
+ end,
+ try run_log(test_init(Conf), Entries, Invariant) of
+ {_State, _Effects} ->
+ true;
+ _ ->
+ true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+validate_idx_order([], _ReleaseCursorIdx) ->
+ true;
+validate_idx_order(Idxs, ReleaseCursorIdx) ->
+ Min = lists:min(Idxs),
+ case Min < ReleaseCursorIdx of
+ true ->
+ throw({invalid_log_index, Min, ReleaseCursorIdx});
+ false ->
+ ok
+ end.
+
+single_active_prop(Conf0, Commands, ValidateOrder) ->
+ Conf = Conf0#{release_cursor_interval => 100},
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ %% invariant: there can only be one active consumer at any one time
+ %% there can however be multiple cancelled consumers
+ Invariant = fun (#rabbit_fifo{consumers = Consumers}) ->
+ Up = maps:filter(fun (_, #consumer{status = S}) ->
+ S == up
+ end, Consumers),
+ map_size(Up) =< 1
+ end,
+ try run_log(test_init(Conf), Entries, Invariant) of
+ {_State, Effects} when ValidateOrder ->
+ % ct:pal("Effects: ~p~n", [Effects]),
+ % ct:pal("State: ~p~n", [State]),
+ %% validate message ordering
+ lists:foldl(fun ({send_msg, Pid, {delivery, Tag, Msgs}, ra_event},
+ Acc) ->
+ validate_msg_order({Tag, Pid}, Msgs, Acc);
+ (_, Acc) ->
+ Acc
+ end, -1, Effects),
+ true;
+ _ ->
+ true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+%% single active consumer ordering invariant:
+%% only redelivered messages can go backwards
+validate_msg_order(_, [], S) ->
+ S;
+validate_msg_order(Cid, [{_, {H, Num}} | Rem], PrevMax) ->
+ Redelivered = is_map(H) andalso maps:is_key(delivery_count, H),
+ case undefined of
+ _ when Num == PrevMax + 1 ->
+ %% forwards case
+ validate_msg_order(Cid, Rem, Num);
+ _ when Redelivered andalso Num =< PrevMax ->
+ %% the seq is lower but this is a redelivery
+ %% when the consumer changed and the next messages has been redelivered
+ %% we may go backwards but keep the highest seen
+ validate_msg_order(Cid, Rem, PrevMax);
+ _ ->
+ ct:pal("out of order ~w Prev ~w Curr ~w Redel ~w",
+ [Cid, PrevMax, Num, Redelivered]),
+ throw({outoforder, Cid, PrevMax, Num})
+ end.
+
+
+
+
+dump_generated(Conf, Commands) ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ true.
+
+snapshots_prop(Conf, Commands) ->
+ try run_snapshot_test(Conf, Commands) of
+ _ -> true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+log_gen(Size) ->
+ log_gen(Size, binary()).
+
+log_gen(Size, _Body) ->
+ Nodes = [node(),
+ fakenode@fake,
+ fakenode@fake2
+ ],
+ ?LET(EPids, vector(2, pid_gen(Nodes)),
+ ?LET(CPids, vector(2, pid_gen(Nodes)),
+ resize(Size,
+ list(
+ frequency(
+ [{20, enqueue_gen(oneof(EPids))},
+ {40, {input_event,
+ frequency([{10, settle},
+ {2, return},
+ {2, discard},
+ {2, requeue}])}},
+ {2, checkout_gen(oneof(CPids))},
+ {1, checkout_cancel_gen(oneof(CPids))},
+ {1, down_gen(oneof(EPids ++ CPids))},
+ {1, nodeup_gen(Nodes)},
+ {1, purge}
+ ]))))).
+
+log_gen_config(Size) ->
+ Nodes = [node(),
+ fakenode@fake,
+ fakenode@fake2
+ ],
+ ?LET(EPids, vector(2, pid_gen(Nodes)),
+ ?LET(CPids, vector(2, pid_gen(Nodes)),
+ resize(Size,
+ list(
+ frequency(
+ [{20, enqueue_gen(oneof(EPids))},
+ {40, {input_event,
+ frequency([{5, settle},
+ {5, return},
+ {2, discard},
+ {2, requeue}])}},
+ {2, checkout_gen(oneof(CPids))},
+ {1, checkout_cancel_gen(oneof(CPids))},
+ {1, down_gen(oneof(EPids ++ CPids))},
+ {1, nodeup_gen(Nodes)},
+ {1, purge},
+ {1, ?LET({MaxInMem,
+ MaxLen},
+ {choose(1, 10),
+ choose(1, 10)},
+ {update_config,
+ #{max_in_memory_length => MaxInMem,
+ max_length => MaxLen}})
+ }]))))).
+
+log_gen_ordered(Size) ->
+ Nodes = [node(),
+ fakenode@fake,
+ fakenode@fake2
+ ],
+ ?LET(EPids, vector(1, pid_gen(Nodes)),
+ ?LET(CPids, vector(8, pid_gen(Nodes)),
+ resize(Size,
+ list(
+ frequency(
+ [{20, enqueue_gen(oneof(EPids), 10, 0)},
+ {40, {input_event,
+ frequency([{15, settle},
+ {1, return},
+ {1, discard},
+ {1, requeue}])}},
+ {7, checkout_gen(oneof(CPids))},
+ {2, checkout_cancel_gen(oneof(CPids))},
+ {2, down_gen(oneof(EPids ++ CPids))},
+ {1, nodeup_gen(Nodes)}
+ ]))))).
+
+monotonic_gen() ->
+ ?LET(_, integer(), erlang:unique_integer([positive, monotonic])).
+
+pid_gen(Nodes) ->
+ ?LET(Node, oneof(Nodes),
+ test_util:fake_pid(atom_to_binary(Node, utf8))).
+
+down_gen(Pid) ->
+ ?LET(E, {down, Pid, oneof([noconnection, noproc])}, E).
+
+nodeup_gen(Nodes) ->
+ {nodeup, oneof(Nodes)}.
+
+enqueue_gen(Pid) ->
+ enqueue_gen(Pid, 10, 1).
+
+enqueue_gen(Pid, Enq, Del) ->
+ ?LET(E, {enqueue, Pid,
+ frequency([{Enq, enqueue},
+ {Del, delay}]),
+ binary()}, E).
+
+checkout_cancel_gen(Pid) ->
+ {checkout, Pid, cancel}.
+
+checkout_gen(Pid) ->
+ %% pid, tag, prefetch
+ ?LET(C, {checkout, {binary(), Pid}, choose(1, 100)}, C).
+
+
+-record(t, {state = rabbit_fifo:init(#{name => proper,
+ queue_resource => blah,
+ release_cursor_interval => 1})
+ :: rabbit_fifo:state(),
+ index = 1 :: non_neg_integer(), %% raft index
+ enqueuers = #{} :: #{pid() => term()},
+ consumers = #{} :: #{{binary(), pid()} => term()},
+ effects = queue:new() :: queue:queue(),
+ %% to transform the body
+ enq_body_fun = {0, fun ra_lib:id/1},
+ config :: map(),
+ log = [] :: list(),
+ down = #{} :: #{pid() => noproc | noconnection}
+ }).
+
+expand(Ops, Config) ->
+ expand(Ops, Config, {undefined, fun ra_lib:id/1}).
+
+expand(Ops, Config, EnqFun) ->
+ %% execute each command against a rabbit_fifo state and capture all relevant
+ %% effects
+ T = #t{enq_body_fun = EnqFun,
+ config = Config},
+ #t{effects = Effs} = T1 = lists:foldl(fun handle_op/2, T, Ops),
+ %% process the remaining effect
+ #t{log = Log} = lists:foldl(fun do_apply/2,
+ T1#t{effects = queue:new()},
+ queue:to_list(Effs)),
+
+ lists:reverse(Log).
+
+
+handle_op({enqueue, Pid, When, Data},
+ #t{enqueuers = Enqs0,
+ enq_body_fun = {EnqSt0, Fun},
+ down = Down,
+ effects = Effs} = T) ->
+ case Down of
+ #{Pid := noproc} ->
+ %% if it's a noproc then it cannot exist - can it?
+ %% drop operation
+ T;
+ _ ->
+ Enqs = maps:update_with(Pid, fun (Seq) -> Seq + 1 end, 1, Enqs0),
+ MsgSeq = maps:get(Pid, Enqs),
+ {EnqSt, Msg} = Fun({EnqSt0, Data}),
+ Cmd = rabbit_fifo:make_enqueue(Pid, MsgSeq, Msg),
+ case When of
+ enqueue ->
+ do_apply(Cmd, T#t{enqueuers = Enqs,
+ enq_body_fun = {EnqSt, Fun}});
+ delay ->
+ %% just put the command on the effects queue
+ T#t{effects = queue:in(Cmd, Effs),
+ enqueuers = Enqs,
+ enq_body_fun = {EnqSt, Fun}}
+ end
+ end;
+handle_op({checkout, Pid, cancel}, #t{consumers = Cons0} = T) ->
+ case maps:keys(
+ maps:filter(fun ({_, P}, _) when P == Pid -> true;
+ (_, _) -> false
+ end, Cons0)) of
+ [CId | _] ->
+ Cons = maps:remove(CId, Cons0),
+ Cmd = rabbit_fifo:make_checkout(CId, cancel, #{}),
+ do_apply(Cmd, T#t{consumers = Cons});
+ _ ->
+ T
+ end;
+handle_op({checkout, CId, Prefetch}, #t{consumers = Cons0} = T) ->
+ case Cons0 of
+ #{CId := _} ->
+ %% ignore if it already exists
+ T;
+ _ ->
+ Cons = maps:put(CId, ok, Cons0),
+ Cmd = rabbit_fifo:make_checkout(CId,
+ {auto, Prefetch, simple_prefetch},
+ #{ack => true,
+ prefetch => Prefetch,
+ username => <<"user">>,
+ args => []}),
+
+ do_apply(Cmd, T#t{consumers = Cons})
+ end;
+handle_op({down, Pid, Reason} = Cmd, #t{down = Down} = T) ->
+ case Down of
+ #{Pid := noproc} ->
+ %% it it permanently down, cannot upgrade
+ T;
+ _ ->
+ %% it is either not down or down with noconnection
+ do_apply(Cmd, T#t{down = maps:put(Pid, Reason, Down)})
+ end;
+handle_op({nodeup, _} = Cmd, T) ->
+ do_apply(Cmd, T);
+handle_op({input_event, requeue}, #t{effects = Effs} = T) ->
+ %% this simulates certain settlements arriving out of order
+ case queue:out(Effs) of
+ {{value, Cmd}, Q} ->
+ T#t{effects = queue:in(Cmd, Q)};
+ _ ->
+ T
+ end;
+handle_op({input_event, Settlement}, #t{effects = Effs,
+ down = Down} = T) ->
+ case queue:out(Effs) of
+ {{value, {settle, MsgIds, CId}}, Q} ->
+ Cmd = case Settlement of
+ settle -> rabbit_fifo:make_settle(CId, MsgIds);
+ return -> rabbit_fifo:make_return(CId, MsgIds);
+ discard -> rabbit_fifo:make_discard(CId, MsgIds)
+ end,
+ do_apply(Cmd, T#t{effects = Q});
+ {{value, {enqueue, Pid, _, _} = Cmd}, Q} ->
+ case maps:is_key(Pid, Down) of
+ true ->
+ %% enqueues cannot arrive after down for the same process
+ %% drop message
+ T#t{effects = Q};
+ false ->
+ do_apply(Cmd, T#t{effects = Q})
+ end;
+ _ ->
+ T
+ end;
+handle_op(purge, T) ->
+ do_apply(rabbit_fifo:make_purge(), T);
+handle_op({update_config, Changes}, #t{config = Conf} = T) ->
+ Config = maps:merge(Conf, Changes),
+ do_apply(rabbit_fifo:make_update_config(Config), T).
+
+
+do_apply(Cmd, #t{effects = Effs,
+ index = Index, state = S0,
+ down = Down,
+ log = Log} = T) ->
+ case Cmd of
+ {enqueue, Pid, _, _} when is_map_key(Pid, Down) ->
+ %% down
+ T;
+ _ ->
+ {St, Effects} = case rabbit_fifo:apply(meta(Index), Cmd, S0) of
+ {S, _, E} when is_list(E) ->
+ {S, E};
+ {S, _, E} ->
+ {S, [E]};
+ {S, _} ->
+ {S, []}
+ end,
+
+ T#t{state = St,
+ index = Index + 1,
+ effects = enq_effs(Effects, Effs),
+ log = [Cmd | Log]}
+ end.
+
+enq_effs([], Q) -> Q;
+enq_effs([{send_msg, P, {delivery, CTag, Msgs}, ra_event} | Rem], Q) ->
+ MsgIds = [I || {I, _} <- Msgs],
+ %% always make settle commands by default
+ %% they can be changed depending on the input event later
+ Cmd = rabbit_fifo:make_settle({CTag, P}, MsgIds),
+ enq_effs(Rem, queue:in(Cmd, Q));
+enq_effs([_ | Rem], Q) ->
+ enq_effs(Rem, Q).
+
+
+%% Utility
+run_proper(Fun, Args, NumTests) ->
+ ?assertEqual(
+ true,
+ proper:counterexample(
+ erlang:apply(Fun, Args),
+ [{numtests, NumTests},
+ {on_output, fun(".", _) -> ok; % don't print the '.'s on new lines
+ (F, A) -> ct:pal(?LOW_IMPORTANCE, F, A)
+ end}])).
+
+run_snapshot_test(Conf, Commands) ->
+ %% create every incremental permutation of the commands lists
+ %% and run the snapshot tests against that
+ ct:pal("running snapshot test with ~b commands using config ~p",
+ [length(Commands), Conf]),
+ [begin
+ % ?debugFmt("~w running command to ~w~n", [?FUNCTION_NAME, lists:last(C)]),
+ run_snapshot_test0(Conf, C)
+ end || C <- prefixes(Commands, 1, [])].
+
+run_snapshot_test0(Conf, Commands) ->
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ {State0, Effects} = run_log(test_init(Conf), Entries),
+ State = rabbit_fifo:normalize(State0),
+
+ [begin
+ % ct:pal("release_cursor: ~b~n", [SnapIdx]),
+ %% drop all entries below and including the snapshot
+ Filtered = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true;
+ (_) -> false
+ end, Entries),
+ {S0, _} = run_log(SnapState, Filtered),
+ S = rabbit_fifo:normalize(S0),
+ % assert log can be restored from any release cursor index
+ case S of
+ State -> ok;
+ _ ->
+ ct:pal("Snapshot tests failed run log:~n"
+ "~p~n from ~n~p~n Entries~n~p~n"
+ "Config: ~p~n",
+ [Filtered, SnapState, Entries, Conf]),
+ ct:pal("Expected~n~p~nGot:~n~p", [State, S]),
+ ?assertEqual(State, S)
+ end
+ end || {release_cursor, SnapIdx, SnapState} <- Effects],
+ ok.
+
+%% transforms [1,2,3] into [[1,2,3], [1,2], [1]]
+prefixes(Source, N, Acc) when N > length(Source) ->
+ lists:reverse(Acc);
+prefixes(Source, N, Acc) ->
+ {X, _} = lists:split(N, Source),
+ prefixes(Source, N+1, [X | Acc]).
+
+run_log(InitState, Entries) ->
+ run_log(InitState, Entries, fun(_) -> true end).
+
+run_log(InitState, Entries, InvariantFun) ->
+ Invariant = fun(E, S) ->
+ case InvariantFun(S) of
+ true -> ok;
+ false ->
+ throw({invariant, E, S})
+ end
+ end,
+
+ lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) ->
+ case rabbit_fifo:apply(meta(Idx), E, Acc0) of
+ {Acc, _, Efx} when is_list(Efx) ->
+ Invariant(E, Acc),
+ {Acc, Efx0 ++ Efx};
+ {Acc, _, Efx} ->
+ Invariant(E, Acc),
+ {Acc, Efx0 ++ [Efx]};
+ {Acc, _} ->
+ Invariant(E, Acc),
+ {Acc, Efx0}
+ end
+ end, {InitState, []}, Entries).
+
+test_init(Conf) ->
+ Default = #{queue_resource => blah,
+ release_cursor_interval => 0,
+ metrics_handler => {?MODULE, metrics_handler, []}},
+ rabbit_fifo:init(maps:merge(Default, Conf)).
+
+meta(Idx) ->
+ #{index => Idx, term => 1, system_time => 0}.
+
+make_checkout(Cid, Spec) ->
+ rabbit_fifo:make_checkout(Cid, Spec, #{}).
+
+make_enqueue(Pid, Seq, Msg) ->
+ rabbit_fifo:make_enqueue(Pid, Seq, Msg).
+
+make_settle(Cid, MsgIds) ->
+ rabbit_fifo:make_settle(Cid, MsgIds).
+
+make_return(Cid, MsgIds) ->
+ rabbit_fifo:make_return(Cid, MsgIds).
diff --git a/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl b/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl
new file mode 100644
index 0000000000..fcb84377de
--- /dev/null
+++ b/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl
@@ -0,0 +1,1392 @@
+-module(rabbit_fifo_v0_SUITE).
+
+%% rabbit_fifo unit tests suite
+
+-compile(export_all).
+
+-compile({no_auto_import, [apply/3]}).
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("src/rabbit_fifo_v0.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+%% replicate eunit like test resultion
+all_tests() ->
+ [F || {F, _} <- ?MODULE:module_info(functions),
+ re:run(atom_to_list(F), "_test$") /= nomatch].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+-define(ASSERT_EFF(EfxPat, Effects),
+ ?ASSERT_EFF(EfxPat, true, Effects)).
+
+-define(ASSERT_EFF(EfxPat, Guard, Effects),
+ ?assert(lists:any(fun (EfxPat) when Guard -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(ASSERT_NO_EFF(EfxPat, Effects),
+ ?assert(not lists:any(fun (EfxPat) -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(assertNoEffect(EfxPat, Effects),
+ ?assert(not lists:any(fun (EfxPat) -> true;
+ (_) -> false
+ end, Effects))).
+
+test_init(Name) ->
+ init(#{name => Name,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(Name, utf8)),
+ release_cursor_interval => 0}).
+
+enq_enq_checkout_test(_) ->
+ Cid = {<<"enq_enq_checkout_test">>, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {_State3, _, Effects} =
+ apply(meta(3),
+ rabbit_fifo_v0:make_checkout(Cid, {once, 2, simple_prefetch}, #{}),
+ State2),
+ ?ASSERT_EFF({monitor, _, _}, Effects),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, Effects),
+ ok.
+
+credit_enq_enq_checkout_settled_credit_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {State3, _, Effects} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {auto, 1, credited}, #{}), State2),
+ ?ASSERT_EFF({monitor, _, _}, Effects),
+ Deliveries = lists:filter(fun ({send_msg, _, {delivery, _, _}, _}) -> true;
+ (_) -> false
+ end, Effects),
+ ?assertEqual(1, length(Deliveries)),
+ %% settle the delivery this should _not_ result in further messages being
+ %% delivered
+ {State4, SettledEffects} = settle(Cid, 4, 1, State3),
+ ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) ->
+ true;
+ (_) -> false
+ end, SettledEffects)),
+ %% granting credit (3) should deliver the second msg if the receivers
+ %% delivery count is (1)
+ {State5, CreditEffects} = credit(Cid, 5, 1, 1, false, State4),
+ % ?debugFmt("CreditEffects ~p ~n~p", [CreditEffects, State4]),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, CreditEffects),
+ {_State6, FinalEffects} = enq(6, 3, third, State5),
+ ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) ->
+ true;
+ (_) -> false
+ end, FinalEffects)),
+ ok.
+
+credit_with_drained_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = test_init(test),
+ %% checkout with a single credit
+ {State1, _, _} =
+ apply(meta(1), rabbit_fifo_v0:make_checkout(Cid, {auto, 1, credited},#{}),
+ State0),
+ ?assertMatch(#?STATE{consumers = #{Cid := #consumer{credit = 1,
+ delivery_count = 0}}},
+ State1),
+ {State, Result, _} =
+ apply(meta(3), rabbit_fifo_v0:make_credit(Cid, 0, 5, true), State1),
+ ?assertMatch(#?STATE{consumers = #{Cid := #consumer{credit = 0,
+ delivery_count = 5}}},
+ State),
+ ?assertEqual({multi, [{send_credit_reply, 0},
+ {send_drained, {?FUNCTION_NAME, 5}}]},
+ Result),
+ ok.
+
+credit_and_drain_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ %% checkout without any initial credit (like AMQP 1.0 would)
+ {State3, _, CheckEffs} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {auto, 0, credited}, #{}),
+ State2),
+
+ ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, CheckEffs),
+ {State4, {multi, [{send_credit_reply, 0},
+ {send_drained, {?FUNCTION_NAME, 2}}]},
+ Effects} = apply(meta(4), rabbit_fifo_v0:make_credit(Cid, 4, 0, true), State3),
+ ?assertMatch(#?STATE{consumers = #{Cid := #consumer{credit = 0,
+ delivery_count = 4}}},
+ State4),
+
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}},
+ {_, {_, second}}]}, _}, Effects),
+ {_State5, EnqEffs} = enq(5, 2, third, State4),
+ ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, EnqEffs),
+ ok.
+
+
+
+enq_enq_deq_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ % get returns a reply value
+ NumReady = 1,
+ {_State3, {dequeue, {0, {_, first}}, NumReady}, [{monitor, _, _}]} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State2),
+ ok.
+
+enq_enq_deq_deq_settle_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ % get returns a reply value
+ {State3, {dequeue, {0, {_, first}}, 1}, [{monitor, _, _}]} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State2),
+ {_State4, {dequeue, empty}} =
+ apply(meta(4), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State3),
+ ok.
+
+enq_enq_checkout_get_settled_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ % get returns a reply value
+ {_State2, {dequeue, {0, {_, first}}, _}, _Effs} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}),
+ State1),
+ ok.
+
+checkout_get_empty_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State = test_init(test),
+ {_State2, {dequeue, empty}} =
+ apply(meta(1), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}), State),
+ ok.
+
+untracked_enq_deq_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = test_init(test),
+ {State1, _, _} = apply(meta(1),
+ rabbit_fifo_v0:make_enqueue(undefined, undefined, first),
+ State0),
+ {_State2, {dequeue, {0, {_, first}}, _}, _} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}), State1),
+ ok.
+
+release_cursor_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {State3, _} = check(Cid, 3, 10, State2),
+ % no release cursor effect at this point
+ {State4, _} = settle(Cid, 4, 1, State3),
+ {_Final, Effects1} = settle(Cid, 5, 0, State4),
+ % empty queue forwards release cursor all the way
+ ?ASSERT_EFF({release_cursor, 5, _}, Effects1),
+ ok.
+
+checkout_enq_settle_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, [{monitor, _, _} | _]} = check(Cid, 1, test_init(test)),
+ {State2, Effects0} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, ?FUNCTION_NAME,
+ [{0, {_, first}}]}, _},
+ Effects0),
+ {State3, [_Inactive]} = enq(3, 2, second, State2),
+ {_, _Effects} = settle(Cid, 4, 0, State3),
+ % the release cursor is the smallest raft index that does not
+ % contribute to the state of the application
+ % ?ASSERT_EFF({release_cursor, 2, _}, Effects),
+ ok.
+
+out_of_order_enqueue_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, [{monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)),
+ {State2, Effects2} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2),
+ % assert monitor was set up
+ ?ASSERT_EFF({monitor, _, _}, Effects2),
+ % enqueue seq num 3 and 4 before 2
+ {State3, Effects3} = enq(3, 3, third, State2),
+ ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects3),
+ {State4, Effects4} = enq(4, 4, fourth, State3),
+ % assert no further deliveries where made
+ ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects4),
+ {_State5, Effects5} = enq(5, 2, second, State4),
+ % assert two deliveries were now made
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, second}},
+ {_, {_, third}},
+ {_, {_, fourth}}]}, _},
+ Effects5),
+ ok.
+
+out_of_order_first_enqueue_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = check_n(Cid, 5, 5, test_init(test)),
+ {_State2, Effects2} = enq(2, 10, first, State1),
+ ?ASSERT_EFF({monitor, process, _}, Effects2),
+ ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _},
+ Effects2),
+ ok.
+
+duplicate_enqueue_test(_) ->
+ Cid = {<<"duplicate_enqueue_test">>, self()},
+ {State1, [{monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)),
+ {State2, Effects2} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2),
+ {_State3, Effects3} = enq(3, 1, first, State2),
+ ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects3),
+ ok.
+
+return_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Cid2 = {<<"cid2">>, self()},
+ {State0, _} = enq(1, 1, msg, test_init(test)),
+ {State1, _} = check_auto(Cid, 2, State0),
+ {State2, _} = check_auto(Cid2, 3, State1),
+ {State3, _, _} = apply(meta(4), rabbit_fifo_v0:make_return(Cid, [0]), State2),
+ ?assertMatch(#{Cid := #consumer{checked_out = C}} when map_size(C) == 0,
+ State3#?STATE.consumers),
+ ?assertMatch(#{Cid2 := #consumer{checked_out = C2}} when map_size(C2) == 1,
+ State3#?STATE.consumers),
+ ok.
+
+return_dequeue_delivery_limit_test(_) ->
+ Init = init(#{name => test,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(test, utf8)),
+ release_cursor_interval => 0,
+ delivery_limit => 1}),
+ {State0, _} = enq(1, 1, msg, Init),
+
+ Cid = {<<"cid">>, self()},
+ Cid2 = {<<"cid2">>, self()},
+
+ {State1, {MsgId1, _}} = deq(2, Cid, unsettled, State0),
+ {State2, _, _} = apply(meta(4), rabbit_fifo_v0:make_return(Cid, [MsgId1]),
+ State1),
+
+ {State3, {MsgId2, _}} = deq(2, Cid2, unsettled, State2),
+ {State4, _, _} = apply(meta(4), rabbit_fifo_v0:make_return(Cid2, [MsgId2]),
+ State3),
+ ?assertMatch(#{num_messages := 0}, rabbit_fifo_v0:overview(State4)),
+ ok.
+
+return_non_existent_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State0, [_, _Inactive]} = enq(1, 1, second, test_init(test)),
+ % return non-existent
+ {_State2, _} = apply(meta(3), rabbit_fifo_v0:make_return(Cid, [99]), State0),
+ ok.
+
+return_checked_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State0, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active} | _ ]} = check_auto(Cid, 2, State0),
+ % returning immediately checks out the same message again
+ {_, ok, [{send_msg, _, {delivery, _, [{_, _}]}, _},
+ {aux, active}]} =
+ apply(meta(3), rabbit_fifo_v0:make_return(Cid, [MsgId]), State1),
+ ok.
+
+return_checked_out_limit_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Init = init(#{name => test,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(test, utf8)),
+ release_cursor_interval => 0,
+ delivery_limit => 1}),
+ {State0, [_, _]} = enq(1, 1, first, Init),
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active} | _ ]} = check_auto(Cid, 2, State0),
+ % returning immediately checks out the same message again
+ {State2, ok, [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _},
+ {aux, active}]} =
+ apply(meta(3), rabbit_fifo_v0:make_return(Cid, [MsgId]), State1),
+ {#?STATE{ra_indexes = RaIdxs}, ok, [_ReleaseEff]} =
+ apply(meta(4), rabbit_fifo_v0:make_return(Cid, [MsgId2]), State2),
+ ?assertEqual(0, rabbit_fifo_index:size(RaIdxs)),
+ ok.
+
+return_auto_checked_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State00, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State0, [_]} = enq(2, 2, second, State00),
+ % it first active then inactive as the consumer took on but cannot take
+ % any more
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active},
+ {aux, inactive}
+ ]} = check_auto(Cid, 2, State0),
+ % return should include another delivery
+ {_State2, _, Effects} = apply(meta(3), rabbit_fifo_v0:make_return(Cid, [MsgId]), State1),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{_, {#{delivery_count := 1}, first}}]}, _},
+ Effects),
+ ok.
+
+cancelled_checkout_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State00, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State0, [_]} = enq(2, 2, second, State00),
+ {State1, _} = check_auto(Cid, 2, State0),
+ % cancelled checkout should not return pending messages to queue
+ {State2, _, _} = apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, cancel, #{}), State1),
+ ?assertEqual(1, maps:size(State2#?STATE.messages)),
+ ?assertEqual(0, lqueue:len(State2#?STATE.returns)),
+
+ {State3, {dequeue, empty}} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}), State2),
+ %% settle
+ {State4, ok, _} =
+ apply(meta(4), rabbit_fifo_v0:make_settle(Cid, [0]), State3),
+
+ {_State, {dequeue, {_, {_, second}}, _}, _} =
+ apply(meta(5), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}), State4),
+ ok.
+
+down_with_noproc_consumer_returns_unsettled_test(_) ->
+ Cid = {<<"down_consumer_returns_unsettled_test">>, self()},
+ {State0, [_, _]} = enq(1, 1, second, test_init(test)),
+ {State1, [{monitor, process, Pid} | _]} = check(Cid, 2, State0),
+ {State2, _, _} = apply(meta(3), {down, Pid, noproc}, State1),
+ {_State, Effects} = check(Cid, 4, State2),
+ ?ASSERT_EFF({monitor, process, _}, Effects),
+ ok.
+
+down_with_noconnection_marks_suspect_and_node_is_monitored_test(_) ->
+ Pid = spawn(fun() -> ok end),
+ Cid = {<<"down_with_noconnect">>, Pid},
+ Self = self(),
+ Node = node(Pid),
+ {State0, Effects0} = enq(1, 1, second, test_init(test)),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects0),
+ {State1, Effects1} = check_auto(Cid, 2, State0),
+ #consumer{credit = 0} = maps:get(Cid, State1#?STATE.consumers),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects1),
+ % monitor both enqueuer and consumer
+ % because we received a noconnection we now need to monitor the node
+ {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1),
+ #consumer{credit = 1,
+ checked_out = Ch,
+ status = suspected_down} = maps:get(Cid, State2a#?STATE.consumers),
+ ?assertEqual(#{}, Ch),
+ %% validate consumer has credit
+ {State2, _, Effects2} = apply(meta(3), {down, Self, noconnection}, State2a),
+ ?ASSERT_EFF({monitor, node, _}, Effects2),
+ ?assertNoEffect({demonitor, process, _}, Effects2),
+ % when the node comes up we need to retry the process monitors for the
+ % disconnected processes
+ {State3, _, Effects3} = apply(meta(3), {nodeup, Node}, State2),
+ #consumer{status = up} = maps:get(Cid, State3#?STATE.consumers),
+ % try to re-monitor the suspect processes
+ ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects3),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects3),
+ ok.
+
+down_with_noconnection_returns_unack_test(_) ->
+ Pid = spawn(fun() -> ok end),
+ Cid = {<<"down_with_noconnect">>, Pid},
+ {State0, _} = enq(1, 1, second, test_init(test)),
+ ?assertEqual(1, maps:size(State0#?STATE.messages)),
+ ?assertEqual(0, lqueue:len(State0#?STATE.returns)),
+ {State1, {_, _}} = deq(2, Cid, unsettled, State0),
+ ?assertEqual(0, maps:size(State1#?STATE.messages)),
+ ?assertEqual(0, lqueue:len(State1#?STATE.returns)),
+ {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1),
+ ?assertEqual(0, maps:size(State2a#?STATE.messages)),
+ ?assertEqual(1, lqueue:len(State2a#?STATE.returns)),
+ ?assertMatch(#consumer{checked_out = Ch,
+ status = suspected_down}
+ when map_size(Ch) == 0,
+ maps:get(Cid, State2a#?STATE.consumers)),
+ ok.
+
+down_with_noproc_enqueuer_is_cleaned_up_test(_) ->
+ State00 = test_init(test),
+ Pid = spawn(fun() -> ok end),
+ {State0, _, Effects0} = apply(meta(1), rabbit_fifo_v0:make_enqueue(Pid, 1, first), State00),
+ ?ASSERT_EFF({monitor, process, _}, Effects0),
+ {State1, _, _} = apply(meta(3), {down, Pid, noproc}, State0),
+ % ensure there are no enqueuers
+ ?assert(0 =:= maps:size(State1#?STATE.enqueuers)),
+ ok.
+
+discarded_message_without_dead_letter_handler_is_removed_test(_) ->
+ Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()},
+ {State0, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State1, Effects1} = check_n(Cid, 2, 10, State0),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects1),
+ {_State2, _, Effects2} = apply(meta(1),
+ rabbit_fifo_v0:make_discard(Cid, [0]), State1),
+ ?assertNoEffect({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects2),
+ ok.
+
+discarded_message_with_dead_letter_handler_emits_log_effect_test(_) ->
+ Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()},
+ State00 = init(#{name => test,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ dead_letter_handler =>
+ {somemod, somefun, [somearg]}}),
+ {State0, [_, _]} = enq(1, 1, first, State00),
+ {State1, Effects1} = check_n(Cid, 2, 10, State0),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects1),
+ {_State2, _, Effects2} = apply(meta(1), rabbit_fifo_v0:make_discard(Cid, [0]), State1),
+ % assert mod call effect with appended reason and message
+ ?ASSERT_EFF({log, _RaftIdxs, _}, Effects2),
+ ok.
+
+tick_test(_) ->
+ Cid = {<<"c">>, self()},
+ Cid2 = {<<"c2">>, self()},
+ {S0, _} = enq(1, 1, <<"fst">>, test_init(?FUNCTION_NAME)),
+ {S1, _} = enq(2, 2, <<"snd">>, S0),
+ {S2, {MsgId, _}} = deq(3, Cid, unsettled, S1),
+ {S3, {_, _}} = deq(4, Cid2, unsettled, S2),
+ {S4, _, _} = apply(meta(5), rabbit_fifo_v0:make_return(Cid, [MsgId]), S3),
+
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{},
+ {?FUNCTION_NAME, 1, 1, 2, 1, 3, 3},
+ [_Node]
+ ]}] = rabbit_fifo_v0:tick(1, S4),
+ ok.
+
+
+delivery_query_returns_deliveries_test(_) ->
+ Tag = atom_to_binary(?FUNCTION_NAME, utf8),
+ Cid = {Tag, self()},
+ Commands = [
+ rabbit_fifo_v0:make_checkout(Cid, {auto, 5, simple_prefetch}, #{}),
+ rabbit_fifo_v0:make_enqueue(self(), 1, one),
+ rabbit_fifo_v0:make_enqueue(self(), 2, two),
+ rabbit_fifo_v0:make_enqueue(self(), 3, tre),
+ rabbit_fifo_v0:make_enqueue(self(), 4, for)
+ ],
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ {State, _Effects} = run_log(test_init(help), Entries),
+ % 3 deliveries are returned
+ [{0, {_, one}}] = rabbit_fifo_v0:get_checked_out(Cid, 0, 0, State),
+ [_, _, _] = rabbit_fifo_v0:get_checked_out(Cid, 1, 3, State),
+ ok.
+
+pending_enqueue_is_enqueued_on_down_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Pid = self(),
+ {State0, _} = enq(1, 2, first, test_init(test)),
+ {State1, _, _} = apply(meta(2), {down, Pid, noproc}, State0),
+ {_State2, {dequeue, {0, {_, first}}, 0}, _} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}), State1),
+ ok.
+
+duplicate_delivery_test(_) ->
+ {State0, _} = enq(1, 1, first, test_init(test)),
+ {#?STATE{ra_indexes = RaIdxs,
+ messages = Messages}, _} = enq(2, 1, first, State0),
+ ?assertEqual(1, rabbit_fifo_index:size(RaIdxs)),
+ ?assertEqual(1, maps:size(Messages)),
+ ok.
+
+state_enter_file_handle_leader_reservation_test(_) ->
+ S0 = init(#{name => the_name,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ become_leader_handler => {m, f, [a]}}),
+
+ Resource = {resource, <<"/">>, queue, <<"test">>},
+ Effects = rabbit_fifo_v0:state_enter(leader, S0),
+ ?assertEqual([
+ {mod_call, m, f, [a, the_name]},
+ {mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}
+ ], Effects),
+ ok.
+
+state_enter_file_handle_other_reservation_test(_) ->
+ S0 = init(#{name => the_name,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ Effects = rabbit_fifo_v0:state_enter(other, S0),
+ ?assertEqual([
+ {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}
+ ],
+ Effects),
+ ok.
+
+state_enter_monitors_and_notifications_test(_) ->
+ Oth = spawn(fun () -> ok end),
+ {State0, _} = enq(1, 1, first, test_init(test)),
+ Cid = {<<"adf">>, self()},
+ OthCid = {<<"oth">>, Oth},
+ {State1, _} = check(Cid, 2, State0),
+ {State, _} = check(OthCid, 3, State1),
+ Self = self(),
+ Effects = rabbit_fifo_v0:state_enter(leader, State),
+
+ %% monitor all enqueuers and consumers
+ [{monitor, process, Self},
+ {monitor, process, Oth}] =
+ lists:filter(fun ({monitor, process, _}) -> true;
+ (_) -> false
+ end, Effects),
+ [{send_msg, Self, leader_change, ra_event},
+ {send_msg, Oth, leader_change, ra_event}] =
+ lists:filter(fun ({send_msg, _, leader_change, ra_event}) -> true;
+ (_) -> false
+ end, Effects),
+ ?ASSERT_EFF({monitor, process, _}, Effects),
+ ok.
+
+purge_test(_) ->
+ Cid = {<<"purge_test">>, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, {purge, 1}, _} = apply(meta(2), rabbit_fifo_v0:make_purge(), State1),
+ {State3, _} = enq(3, 2, second, State2),
+ % get returns a reply value
+ {_State4, {dequeue, {0, {_, second}}, _}, [{monitor, _, _}]} =
+ apply(meta(4), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}), State3),
+ ok.
+
+purge_with_checkout_test(_) ->
+ Cid = {<<"purge_test">>, self()},
+ {State0, _} = check_auto(Cid, 1, test_init(?FUNCTION_NAME)),
+ {State1, _} = enq(2, 1, <<"first">>, State0),
+ {State2, _} = enq(3, 2, <<"second">>, State1),
+ %% assert message bytes are non zero
+ ?assert(State2#?STATE.msg_bytes_checkout > 0),
+ ?assert(State2#?STATE.msg_bytes_enqueue > 0),
+ {State3, {purge, 1}, _} = apply(meta(2), rabbit_fifo_v0:make_purge(), State2),
+ ?assert(State2#?STATE.msg_bytes_checkout > 0),
+ ?assertEqual(0, State3#?STATE.msg_bytes_enqueue),
+ ?assertEqual(1, rabbit_fifo_index:size(State3#?STATE.ra_indexes)),
+ #consumer{checked_out = Checked} = maps:get(Cid, State3#?STATE.consumers),
+ ?assertEqual(1, maps:size(Checked)),
+ ok.
+
+down_noproc_returns_checked_out_in_order_test(_) ->
+ S0 = test_init(?FUNCTION_NAME),
+ %% enqueue 100
+ S1 = lists:foldl(fun (Num, FS0) ->
+ {FS, _} = enq(Num, Num, Num, FS0),
+ FS
+ end, S0, lists:seq(1, 100)),
+ ?assertEqual(100, maps:size(S1#?STATE.messages)),
+ Cid = {<<"cid">>, self()},
+ {S2, _} = check(Cid, 101, 1000, S1),
+ #consumer{checked_out = Checked} = maps:get(Cid, S2#?STATE.consumers),
+ ?assertEqual(100, maps:size(Checked)),
+ %% simulate down
+ {S, _, _} = apply(meta(102), {down, self(), noproc}, S2),
+ Returns = lqueue:to_list(S#?STATE.returns),
+ ?assertEqual(100, length(Returns)),
+ ?assertEqual(0, maps:size(S#?STATE.consumers)),
+ %% validate returns are in order
+ ?assertEqual(lists:sort(Returns), Returns),
+ ok.
+
+down_noconnection_returns_checked_out_test(_) ->
+ S0 = test_init(?FUNCTION_NAME),
+ NumMsgs = 20,
+ S1 = lists:foldl(fun (Num, FS0) ->
+ {FS, _} = enq(Num, Num, Num, FS0),
+ FS
+ end, S0, lists:seq(1, NumMsgs)),
+ ?assertEqual(NumMsgs, maps:size(S1#?STATE.messages)),
+ Cid = {<<"cid">>, self()},
+ {S2, _} = check(Cid, 101, 1000, S1),
+ #consumer{checked_out = Checked} = maps:get(Cid, S2#?STATE.consumers),
+ ?assertEqual(NumMsgs, maps:size(Checked)),
+ %% simulate down
+ {S, _, _} = apply(meta(102), {down, self(), noconnection}, S2),
+ Returns = lqueue:to_list(S#?STATE.returns),
+ ?assertEqual(NumMsgs, length(Returns)),
+ ?assertMatch(#consumer{checked_out = Ch}
+ when map_size(Ch) == 0,
+ maps:get(Cid, S#?STATE.consumers)),
+ %% validate returns are in order
+ ?assertEqual(lists:sort(Returns), Returns),
+ ok.
+
+single_active_consumer_basic_get_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ ?assertEqual(single_active, State0#?STATE.cfg#cfg.consumer_strategy),
+ ?assertEqual(0, map_size(State0#?STATE.consumers)),
+ {State1, _} = enq(1, 1, first, State0),
+ {_State, {error, unsupported}} =
+ apply(meta(2), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State1),
+ ok.
+
+single_active_consumer_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ ?assertEqual(single_active, State0#?STATE.cfg#cfg.consumer_strategy),
+ ?assertEqual(0, map_size(State0#?STATE.consumers)),
+
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ meta(1),
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch},
+ #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ C3 = {<<"ctag3">>, self()},
+ C4 = {<<"ctag4">>, self()},
+
+ % the first registered consumer is the active one, the others are waiting
+ ?assertEqual(1, map_size(State1#?STATE.consumers)),
+ ?assertMatch(#{C1 := _}, State1#?STATE.consumers),
+ ?assertEqual(3, length(State1#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C2, 1, State1#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C3, 1, State1#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1, State1#?STATE.waiting_consumers)),
+
+ % cancelling a waiting consumer
+ {State2, _, Effects1} = apply(meta(2),
+ make_checkout(C3, cancel, #{}),
+ State1),
+ % the active consumer should still be in place
+ ?assertEqual(1, map_size(State2#?STATE.consumers)),
+ ?assertMatch(#{C1 := _}, State2#?STATE.consumers),
+ % the cancelled consumer has been removed from waiting consumers
+ ?assertEqual(2, length(State2#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C2, 1, State2#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1, State2#?STATE.waiting_consumers)),
+ % there are some effects to unregister the consumer
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C3, Effects1),
+
+ % cancelling the active consumer
+ {State3, _, Effects2} = apply(meta(3),
+ make_checkout(C1, cancel, #{}),
+ State2),
+ % the second registered consumer is now the active one
+ ?assertEqual(1, map_size(State3#?STATE.consumers)),
+ ?assertMatch(#{C2 := _}, State3#?STATE.consumers),
+ % the new active consumer is no longer in the waiting list
+ ?assertEqual(1, length(State3#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1,
+ State3#?STATE.waiting_consumers)),
+ %% should have a cancel consumer handler mod_call effect and
+ %% an active new consumer effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C1, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects2),
+
+ % cancelling the active consumer
+ {State4, _, Effects3} = apply(meta(4),
+ make_checkout(C2, cancel, #{}),
+ State3),
+ % the last waiting consumer became the active one
+ ?assertEqual(1, map_size(State4#?STATE.consumers)),
+ ?assertMatch(#{C4 := _}, State4#?STATE.consumers),
+ % the waiting consumer list is now empty
+ ?assertEqual(0, length(State4#?STATE.waiting_consumers)),
+ % there are some effects to unregister the consumer and
+ % to update the new active one (metrics)
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C2, Effects3),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects3),
+
+ % cancelling the last consumer
+ {State5, _, Effects4} = apply(meta(5),
+ make_checkout(C4, cancel, #{}),
+ State4),
+ % no active consumer anymore
+ ?assertEqual(0, map_size(State5#?STATE.consumers)),
+ % still nothing in the waiting list
+ ?assertEqual(0, length(State5#?STATE.waiting_consumers)),
+ % there is an effect to unregister the consumer + queue inactive effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, _}, Effects4),
+
+ ok.
+
+single_active_consumer_cancel_consumer_when_channel_is_down_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ [C1, C2, C3, C4] = Consumers =
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}],
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, Consumers),
+
+ % the channel of the active consumer goes down
+ {State2, _, Effects} = apply(#{index => 2}, {down, Pid1, noproc}, State1),
+ % fell back to another consumer
+ ?assertEqual(1, map_size(State2#?STATE.consumers)),
+ % there are still waiting consumers
+ ?assertEqual(2, length(State2#?STATE.waiting_consumers)),
+ % effects to unregister the consumer and
+ % to update the new active one (metrics) are there
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C1, Effects),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects),
+
+ % the channel of the active consumer and a waiting consumer goes down
+ {State3, _, Effects2} = apply(#{index => 3}, {down, Pid2, noproc}, State2),
+ % fell back to another consumer
+ ?assertEqual(1, map_size(State3#?STATE.consumers)),
+ % no more waiting consumer
+ ?assertEqual(0, length(State3#?STATE.waiting_consumers)),
+ % effects to cancel both consumers of this channel + effect to update the new active one (metrics)
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C2, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C3, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects2),
+
+ % the last channel goes down
+ {State4, _, Effects3} = apply(#{index => 4}, {down, Pid3, doesnotmatter}, State3),
+ % no more consumers
+ ?assertEqual(0, map_size(State4#?STATE.consumers)),
+ ?assertEqual(0, length(State4#?STATE.waiting_consumers)),
+ % there is an effect to unregister the consumer + queue inactive effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C4, Effects3),
+
+ ok.
+
+single_active_returns_messages_on_noconnection_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1],
+ ConsumerIds = [{_, DownPid}] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1 = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+ {State2, _} = enq(4, 1, msg1, State1),
+ % simulate node goes down
+ {State3, _, _} = apply(meta(5), {down, DownPid, noconnection}, State2),
+ %% assert the consumer is up
+ ?assertMatch([_], lqueue:to_list(State3#?STATE.returns)),
+ ?assertMatch([{_, #consumer{checked_out = Checked}}]
+ when map_size(Checked) == 0,
+ State3#?STATE.waiting_consumers),
+
+ ok.
+
+single_active_consumer_replaces_consumer_when_down_noconnection_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1, n2, node()],
+ ConsumerIds = [C1 = {_, DownPid}, C2, _C3] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1a = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+
+ %% assert the consumer is up
+ ?assertMatch(#{C1 := #consumer{status = up}},
+ State1a#?STATE.consumers),
+
+ {State1, _} = enq(10, 1, msg, State1a),
+
+ % simulate node goes down
+ {State2, _, _} = apply(meta(5), {down, DownPid, noconnection}, State1),
+
+ %% assert a new consumer is in place and it is up
+ ?assertMatch([{C2, #consumer{status = up,
+ checked_out = Ch}}]
+ when map_size(Ch) == 1,
+ maps:to_list(State2#?STATE.consumers)),
+
+ %% the disconnected consumer has been returned to waiting
+ ?assert(lists:any(fun ({C,_}) -> C =:= C1 end,
+ State2#?STATE.waiting_consumers)),
+ ?assertEqual(2, length(State2#?STATE.waiting_consumers)),
+
+ % simulate node comes back up
+ {State3, _, _} = apply(#{index => 2}, {nodeup, node(DownPid)}, State2),
+
+ %% the consumer is still active and the same as before
+ ?assertMatch([{C2, #consumer{status = up}}],
+ maps:to_list(State3#?STATE.consumers)),
+ % the waiting consumers should be un-suspected
+ ?assertEqual(2, length(State3#?STATE.waiting_consumers)),
+ lists:foreach(fun({_, #consumer{status = Status}}) ->
+ ?assert(Status /= suspected_down)
+ end, State3#?STATE.waiting_consumers),
+ ok.
+
+single_active_consumer_all_disconnected_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1, n2],
+ ConsumerIds = [C1 = {_, C1Pid}, C2 = {_, C2Pid}] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1 = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+
+ %% assert the consumer is up
+ ?assertMatch(#{C1 := #consumer{status = up}}, State1#?STATE.consumers),
+
+ % simulate node goes down
+ {State2, _, _} = apply(meta(5), {down, C1Pid, noconnection}, State1),
+ %% assert the consumer fails over to the consumer on n2
+ ?assertMatch(#{C2 := #consumer{status = up}}, State2#?STATE.consumers),
+ {State3, _, _} = apply(meta(6), {down, C2Pid, noconnection}, State2),
+ %% assert these no active consumer after both nodes are maked as down
+ ?assertMatch([], maps:to_list(State3#?STATE.consumers)),
+ %% n2 comes back
+ {State4, _, _} = apply(meta(7), {nodeup, node(C2Pid)}, State3),
+ %% ensure n2 is the active consumer as this node as been registered
+ %% as up again
+ ?assertMatch([{{<<"ctag_n2">>, _}, #consumer{status = up,
+ credit = 1}}],
+ maps:to_list(State4#?STATE.consumers)),
+ ok.
+
+single_active_consumer_state_enter_leader_include_waiting_consumers_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource =>
+ rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ Effects = rabbit_fifo_v0:state_enter(leader, State1),
+ %% 2 effects for each consumer process (channel process), 1 effect for the node,
+ %% 1 effect for file handle reservation
+ ?assertEqual(2 * 3 + 1 + 1, length(Effects)).
+
+single_active_consumer_state_enter_eol_include_waiting_consumers_test(_) ->
+ Resource = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => Resource,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ Effects = rabbit_fifo_v0:state_enter(eol, State1),
+ %% 1 effect for each consumer process (channel process),
+ %% 1 effect for file handle reservation
+ ?assertEqual(4, length(Effects)).
+
+query_consumers_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => false}),
+
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+ Consumers0 = State1#?STATE.consumers,
+ Consumer = maps:get({<<"ctag2">>, self()}, Consumers0),
+ Consumers1 = maps:put({<<"ctag2">>, self()},
+ Consumer#consumer{status = suspected_down}, Consumers0),
+ State2 = State1#?STATE{consumers = Consumers1},
+
+ ?assertEqual(4, rabbit_fifo_v0:query_consumer_count(State2)),
+ Consumers2 = rabbit_fifo_v0:query_consumers(State2),
+ ?assertEqual(4, maps:size(Consumers2)),
+ maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ <<"ctag2">> ->
+ ?assertNot(Active),
+ ?assertEqual(suspected_down, ActivityStatus);
+ _ ->
+ ?assert(Active),
+ ?assertEqual(up, ActivityStatus)
+ end
+ end, [], Consumers2).
+
+query_consumers_when_single_active_consumer_is_on_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+
+ ?assertEqual(4, rabbit_fifo_v0:query_consumer_count(State1)),
+ Consumers = rabbit_fifo_v0:query_consumers(State1),
+ ?assertEqual(4, maps:size(Consumers)),
+ maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ <<"ctag1">> ->
+ ?assert(Active),
+ ?assertEqual(single_active, ActivityStatus);
+ _ ->
+ ?assertNot(Active),
+ ?assertEqual(waiting, ActivityStatus)
+ end
+ end, [], Consumers).
+
+active_flag_updated_when_consumer_suspected_unsuspected_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => false}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} =
+ apply(
+ #{index => 1},
+ rabbit_fifo_v0:make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch},
+ #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ {State2, _, Effects2} = apply(#{index => 3}, {down, Pid1, noconnection}, State1),
+ % 1 effect to update the metrics of each consumer (they belong to the same node), 1 more effect to monitor the node
+ ?assertEqual(4 + 1, length(Effects2)),
+
+ {_, _, Effects3} = apply(#{index => 4}, {nodeup, node(self())}, State2),
+ % for each consumer: 1 effect to update the metrics, 1 effect to monitor the consumer PID
+ ?assertEqual(4 + 4, length(Effects3)).
+
+active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ {State2, _, Effects2} = apply(#{index => 2}, {down, Pid1, noconnection}, State1),
+ % one monitor and one consumer status update (deactivated)
+ ?assertEqual(3, length(Effects2)),
+
+ {_, _, Effects3} = apply(#{index => 3}, {nodeup, node(self())}, State2),
+ % for each consumer: 1 effect to monitor the consumer PID
+ ?assertEqual(5, length(Effects3)).
+
+single_active_cancelled_with_unacked_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ % adding some consumers
+ AddConsumer = fun(C, S0) ->
+ {S, _, _} = apply(
+ meta(1),
+ make_checkout(C,
+ {auto, 1, simple_prefetch},
+ #{}),
+ S0),
+ S
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [C1, C2]),
+
+ %% enqueue 2 messages
+ {State2, _Effects2} = enq(3, 1, msg1, State1),
+ {State3, _Effects3} = enq(4, 2, msg2, State2),
+ %% one should be checked ou to C1
+ %% cancel C1
+ {State4, _, _} = apply(meta(5),
+ make_checkout(C1, cancel, #{}),
+ State3),
+ %% C2 should be the active consumer
+ ?assertMatch(#{C2 := #consumer{status = up,
+ checked_out = #{0 := _}}},
+ State4#?STATE.consumers),
+ %% C1 should be a cancelled consumer
+ ?assertMatch(#{C1 := #consumer{status = cancelled,
+ lifetime = once,
+ checked_out = #{0 := _}}},
+ State4#?STATE.consumers),
+ ?assertMatch([], State4#?STATE.waiting_consumers),
+
+ %% Ack both messages
+ {State5, _Effects5} = settle(C1, 1, 0, State4),
+ %% C1 should now be cancelled
+ {State6, _Effects6} = settle(C2, 2, 0, State5),
+
+ %% C2 should remain
+ ?assertMatch(#{C2 := #consumer{status = up}},
+ State6#?STATE.consumers),
+ %% C1 should be gone
+ ?assertNotMatch(#{C1 := _},
+ State6#?STATE.consumers),
+ ?assertMatch([], State6#?STATE.waiting_consumers),
+ ok.
+
+single_active_with_credited_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ % adding some consumers
+ AddConsumer = fun(C, S0) ->
+ {S, _, _} = apply(
+ meta(1),
+ make_checkout(C,
+ {auto, 0, credited},
+ #{}),
+ S0),
+ S
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [C1, C2]),
+
+ %% add some credit
+ C1Cred = rabbit_fifo_v0:make_credit(C1, 5, 0, false),
+ {State2, _, _Effects2} = apply(meta(3), C1Cred, State1),
+ C2Cred = rabbit_fifo_v0:make_credit(C2, 4, 0, false),
+ {State3, _} = apply(meta(4), C2Cred, State2),
+ %% both consumers should have credit
+ ?assertMatch(#{C1 := #consumer{credit = 5}},
+ State3#?STATE.consumers),
+ ?assertMatch([{C2, #consumer{credit = 4}}],
+ State3#?STATE.waiting_consumers),
+ ok.
+
+purge_nodes_test(_) ->
+ Node = purged@node,
+ ThisNode = node(),
+ EnqPid = test_util:fake_pid(Node),
+ EnqPid2 = test_util:fake_pid(node()),
+ ConPid = test_util:fake_pid(Node),
+ Cid = {<<"tag">>, ConPid},
+ % WaitingPid = test_util:fake_pid(Node),
+
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ single_active_consumer_on => false}),
+ {State1, _, _} = apply(meta(1),
+ rabbit_fifo_v0:make_enqueue(EnqPid, 1, msg1),
+ State0),
+ {State2, _, _} = apply(meta(2),
+ rabbit_fifo_v0:make_enqueue(EnqPid2, 1, msg2),
+ State1),
+ {State3, _} = check(Cid, 3, 1000, State2),
+ {State4, _, _} = apply(meta(4),
+ {down, EnqPid, noconnection},
+ State3),
+ ?assertMatch(
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{}, _Metrics,
+ [ThisNode, Node]
+ ]}] , rabbit_fifo_v0:tick(1, State4)),
+ %% assert there are both enqueuers and consumers
+ {State, _, _} = apply(meta(5),
+ rabbit_fifo_v0:make_purge_nodes([Node]),
+ State4),
+
+ %% assert there are no enqueuers nor consumers
+ ?assertMatch(#?STATE{enqueuers = Enqs} when map_size(Enqs) == 1, State),
+ ?assertMatch(#?STATE{consumers = Cons} when map_size(Cons) == 0, State),
+ ?assertMatch(
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{}, _Metrics,
+ [ThisNode]
+ ]}] , rabbit_fifo_v0:tick(1, State)),
+ ok.
+
+meta(Idx) ->
+ #{index => Idx, term => 1,
+ from => {make_ref(), self()}}.
+
+enq(Idx, MsgSeq, Msg, State) ->
+ strip_reply(
+ apply(meta(Idx), rabbit_fifo_v0:make_enqueue(self(), MsgSeq, Msg), State)).
+
+deq(Idx, Cid, Settlement, State0) ->
+ {State, {dequeue, {MsgId, Msg}, _}, _} =
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {dequeue, Settlement}, #{}),
+ State0),
+ {State, {MsgId, Msg}}.
+
+check_n(Cid, Idx, N, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {auto, N, simple_prefetch}, #{}),
+ State)).
+
+check(Cid, Idx, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {once, 1, simple_prefetch}, #{}),
+ State)).
+
+check_auto(Cid, Idx, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {auto, 1, simple_prefetch}, #{}),
+ State)).
+
+check(Cid, Idx, Num, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {auto, Num, simple_prefetch}, #{}),
+ State)).
+
+settle(Cid, Idx, MsgId, State) ->
+ strip_reply(apply(meta(Idx), rabbit_fifo_v0:make_settle(Cid, [MsgId]), State)).
+
+credit(Cid, Idx, Credit, DelCnt, Drain, State) ->
+ strip_reply(apply(meta(Idx), rabbit_fifo_v0:make_credit(Cid, Credit, DelCnt, Drain),
+ State)).
+
+strip_reply({State, _, Effects}) ->
+ {State, Effects}.
+
+run_log(InitState, Entries) ->
+ lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) ->
+ case apply(meta(Idx), E, Acc0) of
+ {Acc, _, Efx} when is_list(Efx) ->
+ {Acc, Efx0 ++ Efx};
+ {Acc, _, Efx} ->
+ {Acc, Efx0 ++ [Efx]};
+ {Acc, _} ->
+ {Acc, Efx0}
+ end
+ end, {InitState, []}, Entries).
+
+
+%% AUX Tests
+
+aux_test(_) ->
+ _ = ra_machine_ets:start_link(),
+ Aux0 = init_aux(aux_test),
+ MacState = init(#{name => aux_test,
+ queue_resource =>
+ rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ ok = meck:new(ra_log, []),
+ Log = mock_log,
+ meck:expect(ra_log, last_index_term, fun (_) -> {0, 0} end),
+ {no_reply, Aux, mock_log} = handle_aux(leader, cast, active, Aux0,
+ Log, MacState),
+ {no_reply, _Aux, mock_log} = handle_aux(leader, cast, tick, Aux,
+ Log, MacState),
+ [X] = ets:lookup(rabbit_fifo_usage, aux_test),
+ meck:unload(),
+ ?assert(X > 0.0),
+ ok.
+
+%% Utility
+
+init(Conf) -> rabbit_fifo_v0:init(Conf).
+apply(Meta, Entry, State) -> rabbit_fifo_v0:apply(Meta, Entry, State).
+init_aux(Conf) -> rabbit_fifo_v0:init_aux(Conf).
+handle_aux(S, T, C, A, L, M) -> rabbit_fifo_v0:handle_aux(S, T, C, A, L, M).
+make_checkout(C, S, M) -> rabbit_fifo_v0:make_checkout(C, S, M).
diff --git a/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl b/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl
new file mode 100644
index 0000000000..937558aba8
--- /dev/null
+++ b/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_foo_protocol_connection_info).
+
+%% Dummy module to test authentication context propagation
+
+%% API
+-export([additional_authn_params/4]).
+
+additional_authn_params(_Creds, _VHost, _Pid, Infos) ->
+ case proplists:get_value(variable_map, Infos, undefined) of
+ VariableMap when is_map(VariableMap) ->
+ case maps:get(<<"key1">>, VariableMap, []) of
+ Value when is_binary(Value)->
+ [{key1, Value}];
+ [] ->
+ []
+ end;
+ _ ->
+ []
+ end.
diff --git a/deps/rabbit/test/rabbit_ha_test_consumer.erl b/deps/rabbit/test/rabbit_ha_test_consumer.erl
new file mode 100644
index 0000000000..2467e40028
--- /dev/null
+++ b/deps/rabbit/test/rabbit_ha_test_consumer.erl
@@ -0,0 +1,108 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_ha_test_consumer).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([await_response/1, create/5, start/6]).
+
+await_response(ConsumerPid) ->
+ case receive {ConsumerPid, Response} -> Response end of
+ {error, Reason} -> erlang:error(Reason);
+ ok -> ok
+ end.
+
+create(Channel, Queue, TestPid, CancelOnFailover, ExpectingMsgs) ->
+ ConsumerPid = spawn_link(?MODULE, start,
+ [TestPid, Channel, Queue, CancelOnFailover,
+ ExpectingMsgs + 1, ExpectingMsgs]),
+ amqp_channel:subscribe(
+ Channel, consume_method(Queue, CancelOnFailover), ConsumerPid),
+ ConsumerPid.
+
+start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+ error_logger:info_msg("consumer ~p on ~p awaiting ~w messages "
+ "(lowest seen = ~w, cancel-on-failover = ~w)~n",
+ [self(), Channel, MsgsToConsume, LowestSeen,
+ CancelOnFailover]),
+ run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+run(TestPid, _Channel, _Queue, _CancelOnFailover, _LowestSeen, 0) ->
+ consumer_reply(TestPid, ok);
+run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+ receive
+ #'basic.consume_ok'{} ->
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, LowestSeen, MsgsToConsume);
+ {Delivery = #'basic.deliver'{ redelivered = Redelivered },
+ #amqp_msg{payload = Payload}} ->
+ MsgNum = list_to_integer(binary_to_list(Payload)),
+
+ ack(Delivery, Channel),
+
+ %% we can receive any message we've already seen and,
+ %% because of the possibility of multiple requeuings, we
+ %% might see these messages in any order. If we are seeing
+ %% a message again, we don't decrement the MsgsToConsume
+ %% counter.
+ if
+ MsgNum + 1 == LowestSeen ->
+ error_logger:info_msg("recording ~w left ~w",
+ [MsgNum, MsgsToConsume]),
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, MsgNum, MsgsToConsume - 1);
+ MsgNum >= LowestSeen ->
+ error_logger:info_msg(
+ "consumer ~p on ~p ignoring redelivered msg ~p"
+ "lowest seen ~w~n",
+ [self(), Channel, MsgNum, LowestSeen]),
+ true = Redelivered, %% ASSERTION
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, LowestSeen, MsgsToConsume);
+ true ->
+ %% We received a message we haven't seen before,
+ %% but it is not the next message in the expected
+ %% sequence.
+ consumer_reply(TestPid,
+ {error, {unexpected_message, MsgNum}})
+ end;
+ #'basic.cancel'{} when CancelOnFailover ->
+ error_logger:info_msg("consumer ~p on ~p received basic.cancel: "
+ "resubscribing to ~p on ~p~n",
+ [self(), Channel, Queue, Channel]),
+ resubscribe(TestPid, Channel, Queue, CancelOnFailover,
+ LowestSeen, MsgsToConsume);
+ #'basic.cancel'{} ->
+ exit(cancel_received_without_cancel_on_failover)
+ end.
+
+%%
+%% Private API
+%%
+
+resubscribe(TestPid, Channel, Queue, CancelOnFailover, LowestSeen,
+ MsgsToConsume) ->
+ amqp_channel:subscribe(
+ Channel, consume_method(Queue, CancelOnFailover), self()),
+ ok = receive #'basic.consume_ok'{} -> ok
+ end,
+ error_logger:info_msg("re-subscripting consumer ~p on ~p complete "
+ "(received basic.consume_ok)",
+ [self(), Channel]),
+ start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+consume_method(Queue, CancelOnFailover) ->
+ Args = [{<<"x-cancel-on-ha-failover">>, bool, CancelOnFailover}],
+ #'basic.consume'{queue = Queue,
+ arguments = Args}.
+
+ack(#'basic.deliver'{delivery_tag = DeliveryTag}, Channel) ->
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ ok.
+
+consumer_reply(TestPid, Reply) ->
+ TestPid ! {self(), Reply}.
diff --git a/deps/rabbit/test/rabbit_ha_test_producer.erl b/deps/rabbit/test/rabbit_ha_test_producer.erl
new file mode 100644
index 0000000000..ed6969debe
--- /dev/null
+++ b/deps/rabbit/test/rabbit_ha_test_producer.erl
@@ -0,0 +1,131 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_ha_test_producer).
+
+-export([await_response/1, start/6, create/5, create/6]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+await_response(ProducerPid) ->
+ error_logger:info_msg("waiting for producer pid ~p~n", [ProducerPid]),
+ case receive {ProducerPid, Response} -> Response end of
+ ok -> ok;
+ {error, _} = Else -> exit(Else);
+ Else -> exit({weird_response, Else})
+ end.
+
+create(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
+ create(Channel, Queue, TestPid, Confirm, MsgsToSend, acks).
+
+create(Channel, Queue, TestPid, Confirm, MsgsToSend, Mode) ->
+ AckNackMsgs = case Mode of
+ acks -> {ok, {error, received_nacks}};
+ nacks -> {{error, received_acks}, ok}
+ end,
+ ProducerPid = spawn_link(?MODULE, start, [Channel, Queue, TestPid,
+ Confirm, MsgsToSend, AckNackMsgs]),
+ receive
+ {ProducerPid, started} -> ProducerPid
+ end.
+
+start(Channel, Queue, TestPid, Confirm, MsgsToSend, AckNackMsgs) ->
+ ConfirmState =
+ case Confirm of
+ true -> amqp_channel:register_confirm_handler(Channel, self()),
+ #'confirm.select_ok'{} =
+ amqp_channel:call(Channel, #'confirm.select'{}),
+ gb_trees:empty();
+ false -> none
+ end,
+ TestPid ! {self(), started},
+ error_logger:info_msg("publishing ~w msgs on ~p~n", [MsgsToSend, Channel]),
+ producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend, AckNackMsgs).
+
+%%
+%% Private API
+%%
+
+producer(_Channel, _Queue, TestPid, none, 0, _AckNackMsgs) ->
+ TestPid ! {self(), ok};
+producer(Channel, _Queue, TestPid, ConfirmState, 0, {AckMsg, NackMsg}) ->
+ error_logger:info_msg("awaiting confirms on channel ~p~n", [Channel]),
+ Msg = case drain_confirms(none, ConfirmState) of
+ %% No acks or nacks
+ acks -> AckMsg;
+ nacks -> NackMsg;
+ mix -> {error, received_both_acks_and_nacks};
+ {Nacks, CS} -> {error, {missing_confirms, Nacks,
+ lists:sort(gb_trees:keys(CS))}}
+ end,
+ TestPid ! {self(), Msg};
+
+producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend, AckNackMsgs) ->
+ Method = #'basic.publish'{exchange = <<"">>,
+ routing_key = Queue,
+ mandatory = false,
+ immediate = false},
+
+ ConfirmState1 = maybe_record_confirm(ConfirmState, Channel, MsgsToSend),
+
+ amqp_channel:call(Channel, Method,
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = list_to_binary(
+ integer_to_list(MsgsToSend))}),
+
+ producer(Channel, Queue, TestPid, ConfirmState1, MsgsToSend - 1, AckNackMsgs).
+
+maybe_record_confirm(none, _, _) ->
+ none;
+maybe_record_confirm(ConfirmState, Channel, MsgsToSend) ->
+ SeqNo = amqp_channel:next_publish_seqno(Channel),
+ gb_trees:insert(SeqNo, MsgsToSend, ConfirmState).
+
+drain_confirms(Collected, ConfirmState) ->
+ case gb_trees:is_empty(ConfirmState) of
+ true -> Collected;
+ false -> receive
+ #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti} ->
+ Collected1 = case Collected of
+ none -> acks;
+ acks -> acks;
+ nacks -> mix;
+ mix -> mix
+ end,
+ drain_confirms(Collected1,
+ delete_confirms(DeliveryTag, IsMulti,
+ ConfirmState));
+ #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti} ->
+ Collected1 = case Collected of
+ none -> nacks;
+ nacks -> nacks;
+ acks -> mix;
+ mix -> mix
+ end,
+ drain_confirms(Collected1,
+ delete_confirms(DeliveryTag, IsMulti,
+ ConfirmState))
+ after
+ 60000 -> {Collected, ConfirmState}
+ end
+ end.
+
+delete_confirms(DeliveryTag, false, ConfirmState) ->
+ gb_trees:delete(DeliveryTag, ConfirmState);
+delete_confirms(DeliveryTag, true, ConfirmState) ->
+ multi_confirm(DeliveryTag, ConfirmState).
+
+multi_confirm(DeliveryTag, ConfirmState) ->
+ case gb_trees:is_empty(ConfirmState) of
+ true -> ConfirmState;
+ false -> {Key, _, ConfirmState1} = gb_trees:take_smallest(ConfirmState),
+ case Key =< DeliveryTag of
+ true -> multi_confirm(DeliveryTag, ConfirmState1);
+ false -> ConfirmState
+ end
+ end.
diff --git a/deps/rabbit/test/rabbit_msg_record_SUITE.erl b/deps/rabbit/test/rabbit_msg_record_SUITE.erl
new file mode 100644
index 0000000000..a82ba7481d
--- /dev/null
+++ b/deps/rabbit/test/rabbit_msg_record_SUITE.erl
@@ -0,0 +1,213 @@
+-module(rabbit_msg_record_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ ampq091_roundtrip,
+ message_id_ulong,
+ message_id_uuid,
+ message_id_binary,
+ message_id_large_binary,
+ message_id_large_string
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+ampq091_roundtrip(_Config) ->
+ Props = #'P_basic'{content_type = <<"text/plain">>,
+ content_encoding = <<"gzip">>,
+ headers = [{<<"x-stream-offset">>, long, 99},
+ {<<"x-string">>, longstr, <<"a string">>},
+ {<<"x-bool">>, bool, false},
+ {<<"x-unsignedbyte">>, unsignedbyte, 1},
+ {<<"x-unsignedshort">>, unsignedshort, 1},
+ {<<"x-unsignedint">>, unsignedint, 1},
+ {<<"x-signedint">>, signedint, 1},
+ {<<"x-timestamp">>, timestamp, 1},
+ {<<"x-double">>, double, 1.0},
+ {<<"x-float">>, float, 1.0},
+ {<<"x-binary">>, binary, <<"data">>}
+ ],
+ delivery_mode = 2,
+ priority = 99,
+ correlation_id = <<"corr">> ,
+ reply_to = <<"reply-to">>,
+ expiration = <<"1">>,
+ message_id = <<"msg-id">>,
+ timestamp = 99,
+ type = <<"45">>,
+ user_id = <<"banana">>,
+ app_id = <<"rmq">>
+ % cluster_id = <<"adf">>
+ },
+ Payload = [<<"data">>],
+ test_amqp091_roundtrip(Props, Payload),
+ test_amqp091_roundtrip(#'P_basic'{}, Payload),
+ ok.
+
+message_id_ulong(_Config) ->
+ Num = 9876789,
+ ULong = erlang:integer_to_binary(Num),
+ P = #'v1_0.properties'{message_id = {ulong, Num},
+ correlation_id = {ulong, Num}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = ULong,
+ correlation_id = ULong,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id-type">>, longstr, <<"ulong">>},
+ {<<"x-message-id-type">>, longstr, <<"ulong">>}
+ ]},
+ Props),
+ ok.
+
+message_id_uuid(_Config) ->
+ %% fake a uuid
+ UUId = erlang:md5(term_to_binary(make_ref())),
+ TextUUId = rabbit_data_coercion:to_binary(rabbit_guid:to_string(UUId)),
+ P = #'v1_0.properties'{message_id = {uuid, UUId},
+ correlation_id = {uuid, UUId}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = TextUUId,
+ correlation_id = TextUUId,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id-type">>, longstr, <<"uuid">>},
+ {<<"x-message-id-type">>, longstr, <<"uuid">>}
+ ]},
+ Props),
+ ok.
+
+message_id_binary(_Config) ->
+ %% fake a uuid
+ Orig = <<"asdfasdf">>,
+ Text = base64:encode(Orig),
+ P = #'v1_0.properties'{message_id = {binary, Orig},
+ correlation_id = {binary, Orig}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = Text,
+ correlation_id = Text,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id-type">>, longstr, <<"binary">>},
+ {<<"x-message-id-type">>, longstr, <<"binary">>}
+ ]},
+ Props),
+ ok.
+
+message_id_large_binary(_Config) ->
+ %% cannot fit in a shortstr
+ Orig = crypto:strong_rand_bytes(500),
+ P = #'v1_0.properties'{message_id = {binary, Orig},
+ correlation_id = {binary, Orig}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = undefined,
+ correlation_id = undefined,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id">>, longstr, Orig},
+ {<<"x-message-id">>, longstr, Orig}
+ ]},
+ Props),
+ ok.
+
+message_id_large_string(_Config) ->
+ %% cannot fit in a shortstr
+ Orig = base64:encode(crypto:strong_rand_bytes(500)),
+ P = #'v1_0.properties'{message_id = {utf8, Orig},
+ correlation_id = {utf8, Orig}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = undefined,
+ correlation_id = undefined,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id">>, longstr, Orig},
+ {<<"x-message-id">>, longstr, Orig}
+ ]},
+ Props),
+ ok.
+
+%% Utility
+
+test_amqp091_roundtrip(Props, Payload) ->
+ MsgRecord0 = rabbit_msg_record:from_amqp091(Props, Payload),
+ MsgRecord = rabbit_msg_record:init(
+ iolist_to_binary(rabbit_msg_record:to_iodata(MsgRecord0))),
+ % meck:unload(),
+ {PropsOut, PayloadOut} = rabbit_msg_record:to_amqp091(MsgRecord),
+ ?assertEqual(Props, PropsOut),
+ ?assertEqual(iolist_to_binary(Payload),
+ iolist_to_binary(PayloadOut)),
+ ok.
+
+
diff --git a/deps/rabbit/test/rabbit_stream_queue_SUITE.erl b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl
new file mode 100644
index 0000000000..a1055458db
--- /dev/null
+++ b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl
@@ -0,0 +1,1610 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_queue_SUITE).
+
+-include_lib("proper/include/proper.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+suite() ->
+ [{timetrap, 5 * 60000}].
+
+all() ->
+ [
+ {group, single_node},
+ {group, cluster_size_2},
+ {group, cluster_size_3},
+ {group, unclustered_size_3_1},
+ {group, unclustered_size_3_2},
+ {group, unclustered_size_3_3},
+ {group, cluster_size_3_1}
+ ].
+
+groups() ->
+ [
+ {single_node, [], [restart_single_node] ++ all_tests()},
+ {cluster_size_2, [], all_tests()},
+ {cluster_size_3, [], all_tests() ++
+ [delete_replica,
+ delete_down_replica,
+ delete_classic_replica,
+ delete_quorum_replica,
+ consume_from_replica,
+ leader_failover,
+ initial_cluster_size_one,
+ initial_cluster_size_two,
+ initial_cluster_size_one_policy,
+ leader_locator_client_local,
+ leader_locator_random,
+ leader_locator_least_leaders,
+ leader_locator_policy]},
+ {unclustered_size_3_1, [], [add_replica]},
+ {unclustered_size_3_2, [], [consume_without_local_replica]},
+ {unclustered_size_3_3, [], [grow_coordinator_cluster]},
+ {cluster_size_3_1, [], [shrink_coordinator_cluster]}
+ ].
+
+all_tests() ->
+ [
+ declare_args,
+ declare_max_age,
+ declare_invalid_properties,
+ declare_server_named,
+ declare_queue,
+ delete_queue,
+ publish,
+ publish_confirm,
+ recover,
+ consume_without_qos,
+ consume,
+ consume_offset,
+ consume_timestamp_offset,
+ consume_timestamp_last_offset,
+ basic_get,
+ consume_with_autoack,
+ consume_and_nack,
+ consume_and_ack,
+ consume_and_reject,
+ consume_from_last,
+ consume_from_next,
+ consume_from_default,
+ consume_credit,
+ consume_credit_out_of_order_ack,
+ consume_credit_multiple_ack,
+ basic_cancel,
+ max_length_bytes,
+ max_age,
+ invalid_policy,
+ max_age_policy,
+ max_segment_size_policy,
+ purge
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config0) ->
+ rabbit_ct_helpers:log_environment(),
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{stream_tick_interval, 1000},
+ {log, [{file, [{level, debug}]}]}]}),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ ClusterSize = case Group of
+ single_node -> 1;
+ cluster_size_2 -> 2;
+ cluster_size_3 -> 3;
+ cluster_size_3_1 -> 3;
+ unclustered_size_3_1 -> 3;
+ unclustered_size_3_2 -> 3;
+ unclustered_size_3_3 -> 3
+ end,
+ Clustered = case Group of
+ unclustered_size_3_1 -> false;
+ unclustered_size_3_2 -> false;
+ unclustered_size_3_3 -> false;
+ _ -> true
+ end,
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Group},
+ {tcp_ports_base},
+ {rmq_nodes_clustered, Clustered}]),
+ Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]),
+ Ret = rabbit_ct_helpers:run_steps(Config1b,
+ [fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()),
+ case Ret of
+ {skip, _} ->
+ Ret;
+ Config2 ->
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, stream_queue),
+ case EnableFF of
+ ok ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, application, set_env,
+ [rabbit, channel_tick_interval, 100]),
+ Config2;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end
+ end.
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Q = rabbit_data_coercion:to_binary(Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1, [{queue_name, Q}]),
+ rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()).
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [{core_metrics_gc_interval, 100}]}).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
+ Config1 = rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+declare_args(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-length">>, long, 2000}])),
+ assert_queue_type(Server, Q, rabbit_stream_queue).
+
+declare_max_age(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ declare(rabbit_ct_client_helpers:open_channel(Config, Server), Q,
+ [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-age">>, longstr, <<"1A">>}])),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-age">>, longstr, <<"1Y">>}])),
+ assert_queue_type(Server, Q, rabbit_stream_queue).
+
+declare_invalid_properties(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Q = ?config(queue_name, Config),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = Q,
+ auto_delete = true,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = Q,
+ exclusive = true,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = Q,
+ durable = false,
+ arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]})).
+
+declare_server_named(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ declare(rabbit_ct_client_helpers:open_channel(Config, Server),
+ <<"">>, [{<<"x-queue-type">>, longstr, <<"stream">>}])).
+
+declare_queue(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ %% Test declare an existing queue
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ?assertMatch([_], rpc:call(Server, supervisor, which_children,
+ [osiris_server_sup])),
+
+ %% Test declare an existing queue with different arguments
+ ?assertExit(_, declare(Ch, Q, [])).
+
+delete_queue(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})).
+
+add_replica(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+
+ %% Let's also try the add replica command on other queue types, it should fail
+ %% We're doing it in the same test for efficiency, otherwise we have to
+ %% start new rabbitmq clusters every time for a minor testcase
+ QClassic = <<Q/binary, "_classic">>,
+ QQuorum = <<Q/binary, "_quorum">>,
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ?assertEqual({'queue.declare_ok', QClassic, 0, 0},
+ declare(Ch, QClassic, [{<<"x-queue-type">>, longstr, <<"classic">>}])),
+ ?assertEqual({'queue.declare_ok', QQuorum, 0, 0},
+ declare(Ch, QQuorum, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Not a member of the cluster, what would happen?
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, Q, Server1])),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, QClassic, Server1])),
+ ?assertEqual({error, quorum_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, QQuorum, Server1])),
+
+ ok = rabbit_control_helper:command(stop_app, Server1),
+ ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server1),
+ timer:sleep(1000),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, QClassic, Server1])),
+ ?assertEqual({error, quorum_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, QQuorum, Server1])),
+ ?assertEqual(ok,
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, Q, Server1])),
+ %% replicas must be recorded on the state, and if we publish messages then they must
+ %% be stored on disk
+ check_leader_and_replicas(Config, Q, Server0, [Server1]),
+ %% And if we try again? Idempotent
+ ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, Q, Server1])),
+ %% Add another node
+ ok = rabbit_control_helper:command(stop_app, Server2),
+ ok = rabbit_control_helper:command(join_cluster, Server2, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server2),
+ ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, Q, Server2])),
+ check_leader_and_replicas(Config, Q, Server0, [Server1, Server2]).
+
+delete_replica(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ check_leader_and_replicas(Config, Q, Server0, [Server1, Server2]),
+ %% Not a member of the cluster, what would happen?
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, 'zen@rabbit'])),
+ ?assertEqual(ok,
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])),
+ %% check it's gone
+ check_leader_and_replicas(Config, Q, Server0, [Server2]),
+ %% And if we try again? Idempotent
+ ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])),
+ %% Delete the last replica
+ ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server2])),
+ check_leader_and_replicas(Config, Q, Server0, []).
+
+grow_coordinator_cluster(Config) ->
+ [Server0, Server1, _Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ok = rabbit_control_helper:command(stop_app, Server1),
+ ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server1),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ case rpc:call(Server0, ra, members, [{rabbit_stream_coordinator, Server0}]) of
+ {_, Members, _} ->
+ Nodes = lists:sort([N || {_, N} <- Members]),
+ lists:sort([Server0, Server1]) == Nodes;
+ _ ->
+ false
+ end
+ end, 60000).
+
+shrink_coordinator_cluster(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ok = rabbit_control_helper:command(stop_app, Server2),
+ ok = rabbit_control_helper:command(forget_cluster_node, Server0, [atom_to_list(Server2)], []),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ case rpc:call(Server0, ra, members, [{rabbit_stream_coordinator, Server0}]) of
+ {_, Members, _} ->
+ Nodes = lists:sort([N || {_, N} <- Members]),
+ lists:sort([Server0, Server1]) == Nodes;
+ _ ->
+ false
+ end
+ end, 60000).
+
+delete_classic_replica(Config) ->
+ [Server0, Server1, _Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"classic">>}])),
+ %% Not a member of the cluster, what would happen?
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, 'zen@rabbit'])),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])).
+
+delete_quorum_replica(Config) ->
+ [Server0, Server1, _Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ %% Not a member of the cluster, what would happen?
+ ?assertEqual({error, quorum_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, 'zen@rabbit'])),
+ ?assertEqual({error, quorum_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])).
+
+delete_down_replica(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ check_leader_and_replicas(Config, Q, Server0, [Server1, Server2]),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])),
+ %% check it isn't gone
+ check_leader_and_replicas(Config, Q, Server0, [Server1, Server2]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ ?assertEqual(ok,
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])).
+
+publish(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ publish(Ch, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]).
+
+publish_confirm(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]).
+
+restart_single_node(Config) ->
+ [Server] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ publish(Ch, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]),
+
+ rabbit_control_helper:command(stop_app, Server),
+ rabbit_control_helper:command(start_app, Server),
+
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]),
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ publish(Ch1, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>]]).
+
+recover(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ publish(Ch, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]),
+
+ [rabbit_ct_broker_helpers:stop_node(Config, S) || S <- Servers],
+ [rabbit_ct_broker_helpers:start_node(Config, S) || S <- lists:reverse(Servers)],
+
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]),
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ publish(Ch1, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>]]).
+
+consume_without_qos(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ?assertExit({{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, consumer_tag = <<"ctag">>},
+ self())).
+
+consume_without_local_replica(Config) ->
+ [Server0, Server1 | _] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ %% Add another node to the cluster, but it won't have a replica
+ ok = rabbit_control_helper:command(stop_app, Server1),
+ ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server1),
+ timer:sleep(1000),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ qos(Ch1, 10, false),
+ ?assertExit({{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:subscribe(Ch1, #'basic.consume'{queue = Q, consumer_tag = <<"ctag">>},
+ self())).
+
+consume(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ _ = amqp_channel:call(Ch1, #'basic.cancel'{consumer_tag = <<"ctag">>}),
+ ok = amqp_channel:close(Ch1),
+ ok
+ after 5000 ->
+ exit(timeout)
+ end.
+
+consume_offset(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ Payload = << <<"1">> || _ <- lists:seq(1, 500) >>,
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 1000)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ run_proper(
+ fun () ->
+ ?FORALL(Offset, range(0, 999),
+ begin
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, Offset),
+ receive_batch(Ch1, Offset, 999),
+ receive
+ {_,
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, S}]}}}
+ when S < Offset ->
+ exit({unexpected_offset, S})
+ after 1000 ->
+ ok
+ end,
+ amqp_channel:call(Ch1, #'basic.cancel'{consumer_tag = <<"ctag">>}),
+ true
+ end)
+ end, [], 25).
+
+consume_timestamp_offset(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ Payload = <<"111">>,
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ Offset = erlang:system_time(millisecond) - 600000,
+ amqp_channel:subscribe(
+ Ch1,
+ #'basic.consume'{queue = Q,
+ no_ack = false,
+ consumer_tag = <<"ctag">>,
+ arguments = [{<<"x-stream-offset">>, timestamp, Offset}]},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end,
+
+ %% It has subscribed to a very old timestamp, so we will receive the whole stream
+ receive_batch(Ch1, 0, 99).
+
+consume_timestamp_last_offset(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, <<"111">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ %% Subscribe from now/future
+ Offset = erlang:system_time(millisecond) + 60000,
+ amqp_channel:subscribe(
+ Ch1,
+ #'basic.consume'{queue = Q,
+ no_ack = false,
+ consumer_tag = <<"ctag">>,
+ arguments = [{<<"x-stream-offset">>, timestamp, Offset}]},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end,
+
+ receive
+ {_,
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, S}]}}}
+ when S < 100 ->
+ exit({unexpected_offset, S})
+ after 1000 ->
+ ok
+ end,
+
+ %% Publish a few more
+ [publish(Ch, Q, <<"msg2">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ %% Yeah! we got them
+ receive_batch(Ch1, 100, 199).
+
+basic_get(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ amqp_channel:call(Ch, #'basic.get'{queue = Q})).
+
+consume_with_autoack(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ ?assertExit(
+ {{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ subscribe(Ch1, Q, true, 0)).
+
+consume_and_nack(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ ok = amqp_channel:cast(Ch1, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ %% Nack will throw a not implemented exception. As it is a cast operation,
+ %% we'll detect the conneciton/channel closure on the next call.
+ %% Let's try to redeclare and see what happens
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}]))
+ after 10000 ->
+ exit(timeout)
+ end.
+
+basic_cancel(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ 1 == length(rabbit_ct_broker_helpers:rpc(Config, Server, ets, tab2list,
+ [consumer_created]))
+ end, 30000),
+ receive
+ {#'basic.deliver'{}, _} ->
+ amqp_channel:call(Ch1, #'basic.cancel'{consumer_tag = <<"ctag">>}),
+ ?assertMatch([], rabbit_ct_broker_helpers:rpc(Config, Server, ets, tab2list, [consumer_created]))
+ after 10000 ->
+ exit(timeout)
+ end.
+
+consume_and_reject(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ ok = amqp_channel:cast(Ch1, #'basic.reject'{delivery_tag = DeliveryTag,
+ requeue = true}),
+ %% Reject will throw a not implemented exception. As it is a cast operation,
+ %% we'll detect the conneciton/channel closure on the next call.
+ %% Let's try to redeclare and see what happens
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}]))
+ after 10000 ->
+ exit(timeout)
+ end.
+
+consume_and_ack(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ %% It will succeed as ack is now a credit operation. We should be
+ %% able to redeclare a queue (gen_server call op) as the channel
+ %% should still be open and declare is an idempotent operation
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]])
+ after 5000 ->
+ exit(timeout)
+ end.
+
+consume_from_last(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [committed_offset]]),
+
+ %% We'll receive data from the last committed offset, let's check that is not the
+ %% first offset
+ CommittedOffset = proplists:get_value(committed_offset, Info),
+ ?assert(CommittedOffset > 0),
+
+ %% If the offset is not provided, we're subscribing to the tail of the stream
+ amqp_channel:subscribe(
+ Ch1, #'basic.consume'{queue = Q,
+ no_ack = false,
+ consumer_tag = <<"ctag">>,
+ arguments = [{<<"x-stream-offset">>, longstr, <<"last">>}]},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end,
+
+ %% And receive the messages from the last committed offset to the end of the stream
+ receive_batch(Ch1, CommittedOffset, 99),
+
+ %% Publish a few more
+ [publish(Ch, Q, <<"msg2">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ %% Yeah! we got them
+ receive_batch(Ch1, 100, 199).
+
+consume_from_next(Config) ->
+ consume_from_next(Config, [{<<"x-stream-offset">>, longstr, <<"next">>}]).
+
+consume_from_default(Config) ->
+ consume_from_next(Config, []).
+
+consume_from_next(Config, Args) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [committed_offset]]),
+
+ %% We'll receive data from the last committed offset, let's check that is not the
+ %% first offset
+ CommittedOffset = proplists:get_value(committed_offset, Info),
+ ?assert(CommittedOffset > 0),
+
+ %% If the offset is not provided, we're subscribing to the tail of the stream
+ amqp_channel:subscribe(
+ Ch1, #'basic.consume'{queue = Q,
+ no_ack = false,
+ consumer_tag = <<"ctag">>,
+ arguments = Args},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end,
+
+ %% Publish a few more
+ [publish(Ch, Q, <<"msg2">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ %% Yeah! we got them
+ receive_batch(Ch1, 100, 199).
+
+consume_from_replica(Config) ->
+ [Server1, Server2 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch1, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch1, self()),
+ [publish(Ch1, Q, <<"msg1">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch1, 5),
+
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server2),
+ qos(Ch2, 10, false),
+
+ subscribe(Ch2, Q, false, 0),
+ receive_batch(Ch2, 0, 99).
+
+consume_credit(Config) ->
+ %% Because osiris provides one chunk on every read and we don't want to buffer
+ %% messages in the broker to avoid memory penalties, the credit value won't
+ %% be strict - we allow it into the negative values.
+ %% We can test that after receiving a chunk, no more messages are delivered until
+ %% the credit goes back to a positive value.
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ %% Let's publish a big batch, to ensure we have more than a chunk available
+ NumMsgs = 100,
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, NumMsgs)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ %% Let's subscribe with a small credit, easier to test
+ Credit = 2,
+ qos(Ch1, Credit, false),
+ subscribe(Ch1, Q, false, 0),
+
+ %% Receive everything
+ DeliveryTags = receive_batch(),
+
+ %% We receive at least the given credit as we know there are 100 messages in the queue
+ ?assert(length(DeliveryTags) >= Credit),
+
+ %% Let's ack as many messages as we can while avoiding a positive credit for new deliveries
+ {ToAck, Pending} = lists:split(length(DeliveryTags) - Credit, DeliveryTags),
+
+ [ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false})
+ || DeliveryTag <- ToAck],
+
+ %% Nothing here, this is good
+ receive
+ {#'basic.deliver'{}, _} ->
+ exit(unexpected_delivery)
+ after 1000 ->
+ ok
+ end,
+
+ %% Let's ack one more, we should receive a new chunk
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = hd(Pending),
+ multiple = false}),
+
+ %% Yeah, here is the new chunk!
+ receive
+ {#'basic.deliver'{}, _} ->
+ ok
+ after 5000 ->
+ exit(timeout)
+ end.
+
+consume_credit_out_of_order_ack(Config) ->
+ %% Like consume_credit but acknowledging the messages out of order.
+ %% We want to ensure it doesn't behave like multiple, that is if we have
+ %% credit 2 and received 10 messages, sending the ack for the message id
+ %% number 10 should only increase credit by 1.
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ %% Let's publish a big batch, to ensure we have more than a chunk available
+ NumMsgs = 100,
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, NumMsgs)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ %% Let's subscribe with a small credit, easier to test
+ Credit = 2,
+ qos(Ch1, Credit, false),
+ subscribe(Ch1, Q, false, 0),
+
+ %% ******* This is the difference with consume_credit
+ %% Receive everything, let's reverse the delivery tags here so we ack out of order
+ DeliveryTags = lists:reverse(receive_batch()),
+
+ %% We receive at least the given credit as we know there are 100 messages in the queue
+ ?assert(length(DeliveryTags) >= Credit),
+
+ %% Let's ack as many messages as we can while avoiding a positive credit for new deliveries
+ {ToAck, Pending} = lists:split(length(DeliveryTags) - Credit, DeliveryTags),
+
+ [ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false})
+ || DeliveryTag <- ToAck],
+
+ %% Nothing here, this is good
+ receive
+ {#'basic.deliver'{}, _} ->
+ exit(unexpected_delivery)
+ after 1000 ->
+ ok
+ end,
+
+ %% Let's ack one more, we should receive a new chunk
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = hd(Pending),
+ multiple = false}),
+
+ %% Yeah, here is the new chunk!
+ receive
+ {#'basic.deliver'{}, _} ->
+ ok
+ after 5000 ->
+ exit(timeout)
+ end.
+
+consume_credit_multiple_ack(Config) ->
+ %% Like consume_credit but acknowledging the messages out of order.
+ %% We want to ensure it doesn't behave like multiple, that is if we have
+ %% credit 2 and received 10 messages, sending the ack for the message id
+ %% number 10 should only increase credit by 1.
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ %% Let's publish a big batch, to ensure we have more than a chunk available
+ NumMsgs = 100,
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, NumMsgs)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ %% Let's subscribe with a small credit, easier to test
+ Credit = 2,
+ qos(Ch1, Credit, false),
+ subscribe(Ch1, Q, false, 0),
+
+ %% ******* This is the difference with consume_credit
+ %% Receive everything, let's reverse the delivery tags here so we ack out of order
+ DeliveryTag = lists:last(receive_batch()),
+
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = true}),
+
+ %% Yeah, here is the new chunk!
+ receive
+ {#'basic.deliver'{}, _} ->
+ ok
+ after 5000 ->
+ exit(timeout)
+ end.
+
+max_length_bytes(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-length-bytes">>, long, 500},
+ {<<"x-max-segment-size">>, long, 250}])),
+
+ Payload = << <<"1">> || _ <- lists:seq(1, 500) >>,
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ %% We don't yet have reliable metrics, as the committed offset doesn't work
+ %% as a counter once we start applying retention policies.
+ %% Let's wait for messages and hope these are less than the number of published ones
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 100, false),
+ subscribe(Ch1, Q, false, 0),
+
+ ?assert(length(receive_batch()) < 100).
+
+max_age(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-age">>, longstr, <<"10s">>},
+ {<<"x-max-segment-size">>, long, 250}])),
+
+ Payload = << <<"1">> || _ <- lists:seq(1, 500) >>,
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ timer:sleep(10000),
+
+ %% Let's publish again so the new segments will trigger the retention policy
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ timer:sleep(5000),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 200, false),
+ subscribe(Ch1, Q, false, 0),
+ ?assertEqual(100, length(receive_batch())).
+
+leader_failover(Config) ->
+ [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch1, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch1, self()),
+ [publish(Ch1, Q, <<"msg">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch1, 5),
+
+ check_leader_and_replicas(Config, Q, Server1, [Server2, Server3]),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ timer:sleep(30000),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ QName = rabbit_misc:r(<<"/">>, queue, Q),
+ lists:member({name, QName}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader, members]])),
+ NewLeader = proplists:get_value(leader, Info),
+ ?assert(NewLeader =/= Server1),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1).
+
+initial_cluster_size_one(Config) ->
+ [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-initial-cluster-size">>, long, 1}])),
+ check_leader_and_replicas(Config, Q, Server1, []),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})).
+
+initial_cluster_size_two(Config) ->
+ [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-initial-cluster-size">>, long, 2}])),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader, members]])),
+ ?assertEqual(Server1, proplists:get_value(leader, Info)),
+ ?assertEqual(1, length(proplists:get_value(members, Info))),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})).
+
+initial_cluster_size_one_policy(Config) ->
+ [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"cluster-size">>, <<"initial_cluster_size_one_policy">>, <<"queues">>,
+ [{<<"initial-cluster-size">>, 1}]),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-initial-cluster-size">>, long, 1}])),
+ check_leader_and_replicas(Config, Q, Server1, []),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"cluster-size">>).
+
+leader_locator_client_local(Config) ->
+ [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ ?assertEqual(Server1, proplists:get_value(leader, Info)),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ %% Try second node
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server2),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch2, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+
+ [Info2] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ ?assertEqual(Server2, proplists:get_value(leader, Info2)),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch2, #'queue.delete'{queue = Q})),
+
+ %% Try third node
+ Ch3 = rabbit_ct_client_helpers:open_channel(Config, Server3),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch3, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+
+ [Info3] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ ?assertEqual(Server3, proplists:get_value(leader, Info3)),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch3, #'queue.delete'{queue = Q})).
+
+leader_locator_random(Config) ->
+ [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"random">>}])),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ Leader = proplists:get_value(leader, Info),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ repeat_until(
+ fun() ->
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"random">>}])),
+
+ [Info2] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ Leader2 = proplists:get_value(leader, Info2),
+
+ Leader =/= Leader2
+ end, 10).
+
+leader_locator_least_leaders(Config) ->
+ [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ ?assertEqual({'queue.declare_ok', Q1, 0, 0},
+ declare(Ch, Q1, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+ ?assertEqual({'queue.declare_ok', Q2, 0, 0},
+ declare(Ch, Q2, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"least-leaders">>}])),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ Leader = proplists:get_value(leader, Info),
+
+ ?assert(lists:member(Leader, [Server2, Server3])).
+
+leader_locator_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"leader-locator">>, <<"leader_locator_.*">>, <<"queues">>,
+ [{<<"queue-leader-locator">>, <<"random">>}]),
+
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [policy, operator_policy,
+ effective_policy_definition,
+ name, leader]]),
+
+ ?assertEqual(<<"leader-locator">>, proplists:get_value(policy, Info)),
+ ?assertEqual('', proplists:get_value(operator_policy, Info)),
+ ?assertEqual([{<<"queue-leader-locator">>, <<"random">>}],
+ proplists:get_value(effective_policy_definition, Info)),
+
+ Leader = proplists:get_value(leader, Info),
+
+ repeat_until(
+ fun() ->
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ [Info2] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ Leader2 = proplists:get_value(leader, Info2),
+ Leader =/= Leader2
+ end, 10),
+
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"leader-locator">>).
+
+repeat_until(_, 0) ->
+ ct:fail("Condition did not materialize in the expected amount of attempts");
+repeat_until(Fun, N) ->
+ case Fun() of
+ true -> ok;
+ false -> repeat_until(Fun, N - 1)
+ end.
+
+invalid_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ha">>, <<"invalid_policy.*">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}]),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ttl">>, <<"invalid_policy.*">>, <<"queues">>,
+ [{<<"message-ttl">>, 5}]),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [policy, operator_policy,
+ effective_policy_definition]]),
+
+ ?assertEqual('', proplists:get_value(policy, Info)),
+ ?assertEqual('', proplists:get_value(operator_policy, Info)),
+ ?assertEqual([], proplists:get_value(effective_policy_definition, Info)),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl">>).
+
+max_age_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"age">>, <<"max_age_policy.*">>, <<"queues">>,
+ [{<<"max-age">>, <<"1Y">>}]),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [policy, operator_policy,
+ effective_policy_definition]]),
+
+ ?assertEqual(<<"age">>, proplists:get_value(policy, Info)),
+ ?assertEqual('', proplists:get_value(operator_policy, Info)),
+ ?assertEqual([{<<"max-age">>, <<"1Y">>}],
+ proplists:get_value(effective_policy_definition, Info)),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"age">>).
+
+max_segment_size_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"segment">>, <<"max_segment_size.*">>, <<"queues">>,
+ [{<<"max-segment-size">>, 5000}]),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [policy, operator_policy,
+ effective_policy_definition]]),
+
+ ?assertEqual(<<"segment">>, proplists:get_value(policy, Info)),
+ ?assertEqual('', proplists:get_value(operator_policy, Info)),
+ ?assertEqual([{<<"max-segment-size">>, 5000}],
+ proplists:get_value(effective_policy_definition, Info)),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"segment">>).
+
+purge(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ amqp_channel:call(Ch, #'queue.purge'{queue = Q})).
+
+%%----------------------------------------------------------------------------
+
+delete_queues() ->
+ [{ok, _} = rabbit_amqqueue:delete(Q, false, false, <<"dummy">>)
+ || Q <- rabbit_amqqueue:list()].
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+assert_queue_type(Server, Q, Expected) ->
+ Actual = get_queue_type(Server, Q),
+ Expected = Actual.
+
+get_queue_type(Server, Q0) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Q0),
+ {ok, Q1} = rpc:call(Server, rabbit_amqqueue, lookup, [QNameRes]),
+ amqqueue:get_type(Q1).
+
+check_leader_and_replicas(Config, Name, Leader, Replicas0) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Name),
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, QNameRes}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader, members]])),
+ ?assertEqual(Leader, proplists:get_value(leader, Info)),
+ Replicas = lists:sort(Replicas0),
+ ?assertEqual(Replicas, lists:sort(proplists:get_value(members, Info))).
+
+publish(Ch, Queue) ->
+ publish(Ch, Queue, <<"msg">>).
+
+publish(Ch, Queue, Msg) ->
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg}).
+
+subscribe(Ch, Queue, NoAck, Offset) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ consumer_tag = <<"ctag">>,
+ arguments = [{<<"x-stream-offset">>, long, Offset}]},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+qos(Ch, Prefetch, Global) ->
+ ?assertMatch(#'basic.qos_ok'{},
+ amqp_channel:call(Ch, #'basic.qos'{global = Global,
+ prefetch_count = Prefetch})).
+
+receive_batch(Ch, N, N) ->
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, N}]}}} ->
+ ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false})
+ after 60000 ->
+ exit({missing_offset, N})
+ end;
+receive_batch(Ch, N, M) ->
+ receive
+ {_,
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, S}]}}}
+ when S < N ->
+ exit({unexpected_offset, S});
+ {#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, N}]}}} ->
+ ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ receive_batch(Ch, N + 1, M)
+ after 60000 ->
+ exit({missing_offset, N})
+ end.
+
+receive_batch() ->
+ receive_batch([]).
+
+receive_batch(Acc) ->
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ receive_batch([DeliveryTag | Acc])
+ after 5000 ->
+ lists:reverse(Acc)
+ end.
+
+run_proper(Fun, Args, NumTests) ->
+ ?assertEqual(
+ true,
+ proper:counterexample(
+ erlang:apply(Fun, Args),
+ [{numtests, NumTests},
+ {on_output, fun(".", _) -> ok; % don't print the '.'s on new lines
+ (F, A) -> ct:pal(?LOW_IMPORTANCE, F, A)
+ end}])).
diff --git a/deps/rabbit/test/rabbitmq-env.bats b/deps/rabbit/test/rabbitmq-env.bats
new file mode 100644
index 0000000000..4a016960c5
--- /dev/null
+++ b/deps/rabbit/test/rabbitmq-env.bats
@@ -0,0 +1,128 @@
+#!/usr/bin/env bats
+
+export RABBITMQ_SCRIPTS_DIR="$BATS_TEST_DIRNAME/../scripts"
+
+setup() {
+ export RABBITMQ_CONF_ENV_FILE="$BATS_TMPDIR/rabbitmq-env.$BATS_TEST_NAME.conf"
+}
+
+@test "default Erlang scheduler bind type" {
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+ echo $RABBITMQ_SCHEDULER_BIND_TYPE
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +stbt db ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +stbt db "* ]]
+}
+
+@test "can configure Erlang scheduler bind type via conf file" {
+ echo 'SCHEDULER_BIND_TYPE=u' > "$RABBITMQ_CONF_ENV_FILE"
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +stbt u ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +stbt u "* ]]
+}
+
+@test "can configure Erlang scheduler bind type via env" {
+ RABBITMQ_SCHEDULER_BIND_TYPE=tnnps source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +stbt tnnps ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +stbt tnnps "* ]]
+}
+
+@test "Erlang scheduler bind type env takes precedence over conf file" {
+ echo 'SCHEDULER_BIND_TYPE=s' > "$RABBITMQ_CONF_ENV_FILE"
+ RABBITMQ_SCHEDULER_BIND_TYPE=nnps source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +stbt nnps ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +stbt nnps "* ]]
+}
+
+@test "default Erlang distribution buffer size" {
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +zdbbl 128000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +zdbbl 128000 "* ]]
+}
+
+@test "can configure Erlang distribution buffer size via conf file" {
+ echo 'DISTRIBUTION_BUFFER_SIZE=123123' > "$RABBITMQ_CONF_ENV_FILE"
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +zdbbl 123123 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +zdbbl 123123 "* ]]
+}
+
+@test "can configure Erlang distribution buffer size via env" {
+ RABBITMQ_DISTRIBUTION_BUFFER_SIZE=2000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +zdbbl 2000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +zdbbl 2000000 "* ]]
+}
+
+@test "Erlang distribution buffer size env takes precedence over conf file" {
+ echo 'DISTRIBUTION_BUFFER_SIZE=3000000' > "$RABBITMQ_CONF_ENV_FILE"
+ RABBITMQ_DISTRIBUTION_BUFFER_SIZE=4000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +zdbbl 4000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +zdbbl 4000000 "* ]]
+}
+
+@test "default Erlang maximum number of processes" {
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +P 1048576 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +P 1048576 "* ]]
+}
+
+@test "can configure Erlang maximum number of processes via conf file" {
+ echo 'MAX_NUMBER_OF_PROCESSES=2000000' > "$RABBITMQ_CONF_ENV_FILE"
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +P 2000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +P 2000000 "* ]]
+}
+
+@test "can configure Erlang maximum number of processes via env" {
+ RABBITMQ_MAX_NUMBER_OF_PROCESSES=3000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +P 3000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +P 3000000 "* ]]
+}
+
+@test "Erlang maximum number of processes env takes precedence over conf file" {
+ echo 'MAX_NUMBER_OF_PROCESSES=4000000' > "$RABBITMQ_CONF_ENV_FILE"
+ RABBITMQ_MAX_NUMBER_OF_PROCESSES=5000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +P 5000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +P 5000000 "* ]]
+}
+
+@test "default Erlang maximum number of atoms" {
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +t 5000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +t 5000000 "* ]]
+}
+
+@test "can configure Erlang maximum number of atoms via conf file" {
+ echo 'MAX_NUMBER_OF_ATOMS=1000000' > "$RABBITMQ_CONF_ENV_FILE"
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +t 1000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +t 1000000 "* ]]
+}
+
+@test "can configure Erlang maximum number of atoms via env" {
+ RABBITMQ_MAX_NUMBER_OF_ATOMS=2000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +t 2000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +t 2000000 "* ]]
+}
+
+@test "Erlang maximum number of atoms env takes precedence over conf file" {
+ echo 'MAX_NUMBER_OF_ATOMS=3000000' > "$RABBITMQ_CONF_ENV_FILE"
+ RABBITMQ_MAX_NUMBER_OF_ATOMS=4000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +t 4000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +t 4000000 "* ]]
+}
diff --git a/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl b/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl
new file mode 100644
index 0000000000..bf5e9ee79e
--- /dev/null
+++ b/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl
@@ -0,0 +1,139 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbitmq_queues_cli_integration_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+groups() ->
+ [
+ {tests, [], [
+ shrink,
+ grow,
+ grow_invalid_node_filtered
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ case os:getenv("SECONDARY_UMBRELLA") of
+ false ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config);
+ _ ->
+ {skip, "growing and shrinking cannot be done in mixed mode"}
+ end.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(tests, Config0) ->
+ NumNodes = 3,
+ Config1 = rabbit_ct_helpers:set_config(
+ Config0, [{rmq_nodes_count, NumNodes},
+ {rmq_nodes_clustered, true}]),
+ Config2 = rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config2, quorum_queue) of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_group(tests, Config2),
+ Skip
+ end.
+
+end_per_group(tests, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config0) ->
+ rabbit_ct_helpers:ensure_rabbitmq_queues_cmd(
+ rabbit_ct_helpers:testcase_started(Config0, Testcase)).
+
+end_per_testcase(Testcase, Config0) ->
+ rabbit_ct_helpers:testcase_finished(Config0, Testcase).
+
+shrink(Config) ->
+ NodeConfig = rabbit_ct_broker_helpers:get_node_config(Config, 2),
+ Nodename2 = ?config(nodename, NodeConfig),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Nodename2),
+ %% declare a quorum queue
+ QName = "shrink1",
+ #'queue.declare_ok'{} = declare_qq(Ch, QName),
+ {ok, Out1} = rabbitmq_queues(Config, 0, ["shrink", Nodename2]),
+ ?assertMatch(#{{"/", "shrink1"} := {2, ok}}, parse_result(Out1)),
+ Nodename1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ {ok, Out2} = rabbitmq_queues(Config, 0, ["shrink", Nodename1]),
+ ?assertMatch(#{{"/", "shrink1"} := {1, ok}}, parse_result(Out2)),
+ Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, Out3} = rabbitmq_queues(Config, 0, ["shrink", Nodename0]),
+ ?assertMatch(#{{"/", "shrink1"} := {1, error}}, parse_result(Out3)),
+ ok.
+
+grow(Config) ->
+ NodeConfig = rabbit_ct_broker_helpers:get_node_config(Config, 2),
+ Nodename2 = ?config(nodename, NodeConfig),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Nodename2),
+ %% declare a quorum queue
+ QName = "grow1",
+ Args = [{<<"x-quorum-initial-group-size">>, long, 1}],
+ #'queue.declare_ok'{} = declare_qq(Ch, QName, Args),
+ Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, Out1} = rabbitmq_queues(Config, 0, ["grow", Nodename0, "all"]),
+ ?assertMatch(#{{"/", "grow1"} := {2, ok}}, parse_result(Out1)),
+ Nodename1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ {ok, Out2} = rabbitmq_queues(Config, 0, ["grow", Nodename1, "all"]),
+ ?assertMatch(#{{"/", "grow1"} := {3, ok}}, parse_result(Out2)),
+
+ {ok, Out3} = rabbitmq_queues(Config, 0, ["grow", Nodename0, "all"]),
+ ?assertNotMatch(#{{"/", "grow1"} := _}, parse_result(Out3)),
+ ok.
+
+grow_invalid_node_filtered(Config) ->
+ NodeConfig = rabbit_ct_broker_helpers:get_node_config(Config, 2),
+ Nodename2 = ?config(nodename, NodeConfig),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Nodename2),
+ %% declare a quorum queue
+ QName = "grow-err",
+ Args = [{<<"x-quorum-initial-group-size">>, long, 1}],
+ #'queue.declare_ok'{} = declare_qq(Ch, QName, Args),
+ DummyNode = not_really_a_node@nothing,
+ {ok, Out1} = rabbitmq_queues(Config, 0, ["grow", DummyNode, "all"]),
+ ?assertNotMatch(#{{"/", "grow-err"} := _}, parse_result(Out1)),
+ ok.
+
+parse_result(S) ->
+ Lines = string:split(S, "\n", all),
+ maps:from_list(
+ [{{Vhost, QName},
+ {erlang:list_to_integer(Size), case Result of
+ "ok" -> ok;
+ _ -> error
+ end}}
+ || [Vhost, QName, Size, Result] <-
+ [string:split(L, "\t", all) || L <- Lines]]).
+
+declare_qq(Ch, Q, Args0) ->
+ Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}] ++ Args0,
+ amqp_channel:call(Ch, #'queue.declare'{queue = list_to_binary(Q),
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+declare_qq(Ch, Q) ->
+ declare_qq(Ch, Q, []).
+
+rabbitmq_queues(Config, N, Args) ->
+ rabbit_ct_broker_helpers:rabbitmq_queues(Config, N, ["--silent" | Args]).
diff --git a/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl b/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl
new file mode 100644
index 0000000000..9c689f5667
--- /dev/null
+++ b/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl
@@ -0,0 +1,164 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbitmqctl_integration_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([all/0
+ ,groups/0
+ ,init_per_suite/1
+ ,end_per_suite/1
+ ,init_per_group/2
+ ,end_per_group/2
+ ,init_per_testcase/2
+ ,end_per_testcase/2
+ ]).
+
+-export([list_queues_local/1
+ ,list_queues_offline/1
+ ,list_queues_online/1
+ ,list_queues_stopped/1
+ ]).
+
+all() ->
+ [
+ {group, list_queues}
+ ].
+
+groups() ->
+ [
+ {list_queues, [],
+ [list_queues_local
+ ,list_queues_online
+ ,list_queues_offline
+ ,list_queues_stopped
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(list_queues, Config0) ->
+ NumNodes = 3,
+ Config = create_n_node_cluster(Config0, NumNodes),
+ Config1 = declare_some_queues(Config),
+ rabbit_ct_broker_helpers:stop_node(Config1, NumNodes - 1),
+ Config1;
+init_per_group(_, Config) ->
+ Config.
+
+create_n_node_cluster(Config0, NumNodes) ->
+ Config1 = rabbit_ct_helpers:set_config(
+ Config0, [{rmq_nodes_count, NumNodes},
+ {rmq_nodes_clustered, true}]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+declare_some_queues(Config) ->
+ Nodes = rabbit_ct_helpers:get_config(Config, rmq_nodes),
+ PerNodeQueues = [ declare_some_queues(Config, NodeNum)
+ || NodeNum <- lists:seq(0, length(Nodes)-1) ],
+ rabbit_ct_helpers:set_config(Config, {per_node_queues, PerNodeQueues}).
+
+declare_some_queues(Config, NodeNum) ->
+ {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, NodeNum),
+ NumQueues = 5,
+ Queues = [ list_to_binary(io_lib:format("queue-~b-on-node-~b", [QueueNum, NodeNum]))
+ || QueueNum <- lists:seq(1, NumQueues) ],
+ lists:foreach(fun (QueueName) ->
+ #'queue.declare_ok'{} = amqp_channel:call(Chan, #'queue.declare'{queue = QueueName, durable = true})
+ end, Queues),
+ rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
+ Queues.
+
+end_per_group(list_queues, Config0) ->
+ Config1 = case rabbit_ct_helpers:get_config(Config0, save_config) of
+ undefined -> Config0;
+ C -> C
+ end,
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(list_queues_stopped, Config0) ->
+ %% Start node 3 to crash it's queues
+ rabbit_ct_broker_helpers:start_node(Config0, 2),
+ %% Make vhost "down" on nodes 2 and 3
+ rabbit_ct_broker_helpers:force_vhost_failure(Config0, 1, <<"/">>),
+ rabbit_ct_broker_helpers:force_vhost_failure(Config0, 2, <<"/">>),
+
+ rabbit_ct_broker_helpers:stop_node(Config0, 2),
+ rabbit_ct_helpers:testcase_started(Config0, list_queues_stopped);
+
+init_per_testcase(Testcase, Config0) ->
+ rabbit_ct_helpers:testcase_started(Config0, Testcase).
+
+end_per_testcase(Testcase, Config0) ->
+ rabbit_ct_helpers:testcase_finished(Config0, Testcase).
+
+%%----------------------------------------------------------------------------
+%% Test cases
+%%----------------------------------------------------------------------------
+list_queues_local(Config) ->
+ Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))),
+ Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))),
+ assert_ctl_queues(Config, 0, ["--local"], Node1Queues),
+ assert_ctl_queues(Config, 1, ["--local"], Node2Queues),
+ ok.
+
+list_queues_online(Config) ->
+ Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))),
+ Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))),
+ OnlineQueues = Node1Queues ++ Node2Queues,
+ assert_ctl_queues(Config, 0, ["--online"], OnlineQueues),
+ assert_ctl_queues(Config, 1, ["--online"], OnlineQueues),
+ ok.
+
+list_queues_offline(Config) ->
+ Node3Queues = lists:sort(lists:nth(3, ?config(per_node_queues, Config))),
+ OfflineQueues = Node3Queues,
+ assert_ctl_queues(Config, 0, ["--offline"], OfflineQueues),
+ assert_ctl_queues(Config, 1, ["--offline"], OfflineQueues),
+ ok.
+
+list_queues_stopped(Config) ->
+ Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))),
+ Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))),
+ Node3Queues = lists:sort(lists:nth(3, ?config(per_node_queues, Config))),
+
+ %% All queues are listed
+ ListedQueues =
+ [ {Name, State}
+ || [Name, State] <- rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name", "state", "--no-table-headers"]) ],
+
+ [ <<"running">> = proplists:get_value(Q, ListedQueues) || Q <- Node1Queues ],
+ %% Node is running. Vhost is down
+ [ <<"stopped">> = proplists:get_value(Q, ListedQueues) || Q <- Node2Queues ],
+ %% Node is not running. Vhost is down
+ [ <<"down">> = proplists:get_value(Q, ListedQueues) || Q <- Node3Queues ].
+
+%%----------------------------------------------------------------------------
+%% Helpers
+%%----------------------------------------------------------------------------
+assert_ctl_queues(Config, Node, Args, Expected0) ->
+ Expected = lists:sort(Expected0),
+ Got0 = run_list_queues(Config, Node, Args),
+ Got = lists:sort(lists:map(fun hd/1, Got0)),
+ ?assertMatch(Expected, Got).
+
+run_list_queues(Config, Node, Args) ->
+ rabbit_ct_broker_helpers:rabbitmqctl_list(Config, Node, ["list_queues"] ++ Args ++ ["name", "--no-table-headers"]).
diff --git a/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl b/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl
new file mode 100644
index 0000000000..6365f91d47
--- /dev/null
+++ b/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl
@@ -0,0 +1,110 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmqctl_shutdown_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, running_node},
+ {group, non_running_node}
+ ].
+
+groups() ->
+ [
+ {running_node, [], [
+ successful_shutdown
+ ]},
+ {non_running_node, [], [
+ nothing_to_shutdown
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(running_node, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {need_start, true}
+ ]);
+init_per_group(non_running_node, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]).
+
+end_per_group(running_node, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, []);
+end_per_group(non_running_node, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, []).
+
+init_per_testcase(Testcase, Config0) ->
+ Config1 = case ?config(need_start, Config0) of
+ true ->
+ rabbit_ct_helpers:run_setup_steps(Config0,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ [fun save_node/1]);
+ _ ->
+ rabbit_ct_helpers:set_config(Config0,
+ [{node, non_existent_node@localhost}])
+ end,
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config0) ->
+ Config1 = case ?config(need_start, Config0) of
+ true ->
+ rabbit_ct_helpers:run_teardown_steps(Config0,
+ rabbit_ct_broker_helpers:teardown_steps());
+ _ -> Config0
+ end,
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+save_node(Config) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_helpers:set_config(Config, [{node, Node}]).
+
+successful_shutdown(Config) ->
+ Node = ?config(node, Config),
+ Pid = node_pid(Node),
+ ok = shutdown_ok(Node),
+ false = erlang_pid_is_running(Pid),
+ false = node_is_running(Node).
+
+
+nothing_to_shutdown(Config) ->
+ Node = ?config(node, Config),
+
+ { error, 69, _ } =
+ rabbit_ct_broker_helpers:control_action(shutdown, Node, []).
+
+node_pid(Node) ->
+ Val = rpc:call(Node, os, getpid, []),
+ true = is_list(Val),
+ list_to_integer(Val).
+
+erlang_pid_is_running(Pid) ->
+ rabbit_misc:is_os_process_alive(integer_to_list(Pid)).
+
+node_is_running(Node) ->
+ net_adm:ping(Node) == pong.
+
+shutdown_ok(Node) ->
+ %% Start a command
+ {stream, Stream} = rabbit_ct_broker_helpers:control_action(shutdown, Node, []),
+ %% Execute command steps. Each step will output a binary string
+ Lines = 'Elixir.Enum':to_list(Stream),
+ ct:pal("Command output ~p ~n", [Lines]),
+ [true = is_binary(Line) || Line <- Lines],
+ ok.
diff --git a/deps/rabbit/test/signal_handling_SUITE.erl b/deps/rabbit/test/signal_handling_SUITE.erl
new file mode 100644
index 0000000000..551f456039
--- /dev/null
+++ b/deps/rabbit/test/signal_handling_SUITE.erl
@@ -0,0 +1,160 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(signal_handling_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([suite/0,
+ all/0,
+ groups/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+
+ send_sighup/1,
+ send_sigterm/1,
+ send_sigtstp/1
+ ]).
+
+suite() ->
+ [{timetrap, {minutes, 5}}].
+
+all() ->
+ [
+ {group, signal_sent_to_pid_in_pidfile},
+ {group, signal_sent_to_pid_from_os_getpid}
+ ].
+
+groups() ->
+ Signals = [sighup,
+ sigterm,
+ sigtstp],
+ Tests = [list_to_existing_atom(rabbit_misc:format("send_~s", [Signal]))
+ || Signal <- Signals],
+ [
+ {signal_sent_to_pid_in_pidfile, [], Tests},
+ {signal_sent_to_pid_from_os_getpid, [], Tests}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ case os:type() of
+ {unix, _} ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config);
+ _ ->
+ {skip, "This testsuite is only relevant on Unix"}
+ end.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 1,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+send_sighup(Config) ->
+ {PidFile, Pid} = get_pidfile_and_pid(Config),
+
+ %% A SIGHUP signal should be ignored and the node should still be
+ %% running.
+ send_signal(Pid, "HUP"),
+ timer:sleep(10000),
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ?assert(rabbit_ct_broker_helpers:rpc(Config, A, rabbit, is_running, [])),
+ ?assert(filelib:is_regular(PidFile)).
+
+send_sigterm(Config) ->
+ {PidFile, Pid} = get_pidfile_and_pid(Config),
+
+ %% After sending a SIGTERM to the process, we expect the node to
+ %% exit.
+ send_signal(Pid, "TERM"),
+ wait_for_node_exit(Pid),
+
+ %% After a clean exit, the PID file should be removed.
+ ?assertNot(filelib:is_regular(PidFile)).
+
+send_sigtstp(Config) ->
+ {PidFile, Pid} = get_pidfile_and_pid(Config),
+
+ %% A SIGHUP signal should be ignored and the node should still be
+ %% running.
+ send_signal(Pid, "TSTP"),
+ timer:sleep(10000),
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ?assert(rabbit_ct_broker_helpers:rpc(Config, A, rabbit, is_running, [])),
+ ?assert(filelib:is_regular(PidFile)).
+
+get_pidfile_and_pid(Config) ->
+ PidFile = rabbit_ct_broker_helpers:get_node_config(Config, 0, pid_file),
+ ?assert(filelib:is_regular(PidFile)),
+
+ %% We send the signal to either the process referenced in the
+ %% PID file or the Erlang VM process. Up-to 3.8.x, they can be
+ %% the different process because the PID file may reference the
+ %% rabbitmq-server(8) script wrapper.
+ [{name, Group} | _] = ?config(tc_group_properties, Config),
+ Pid = case Group of
+ signal_sent_to_pid_in_pidfile ->
+ {ok, P} = file:read_file(PidFile),
+ string:trim(P, trailing, [$\r,$\n]);
+ signal_sent_to_pid_from_os_getpid ->
+ A = rabbit_ct_broker_helpers:get_node_config(
+ Config, 0, nodename),
+ rabbit_ct_broker_helpers:rpc(Config, A, os, getpid, [])
+ end,
+ {PidFile, Pid}.
+
+send_signal(Pid, Signal) ->
+ Cmd = ["kill",
+ "-" ++ Signal,
+ Pid],
+ ?assertMatch({ok, _}, rabbit_ct_helpers:exec(Cmd)).
+
+wait_for_node_exit(Pid) ->
+ case rabbit_misc:is_os_process_alive(Pid) of
+ true ->
+ timer:sleep(1000),
+ wait_for_node_exit(Pid);
+ false ->
+ ok
+ end.
diff --git a/deps/rabbit/test/simple_ha_SUITE.erl b/deps/rabbit/test/simple_ha_SUITE.erl
new file mode 100644
index 0000000000..8b2c1d6ebb
--- /dev/null
+++ b/deps/rabbit/test/simple_ha_SUITE.erl
@@ -0,0 +1,371 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(simple_ha_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(DELAY, 8000).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ RejectTests = [
+ rejects_survive_stop,
+ rejects_survive_policy
+ ],
+ [
+ {cluster_size_2, [], [
+ rapid_redeclare,
+ declare_synchrony,
+ clean_up_exclusive_queues,
+ clean_up_and_redeclare_exclusive_queues_on_other_nodes
+ ]},
+ {cluster_size_3, [], [
+ consume_survives_stop,
+ consume_survives_policy,
+ auto_resume,
+ auto_resume_no_ccn_client,
+ confirms_survive_stop,
+ confirms_survive_policy,
+ {overflow_reject_publish, [], RejectTests},
+ {overflow_reject_publish_dlx, [], RejectTests}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]);
+init_per_group(overflow_reject_publish, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish">>}
+ ]);
+init_per_group(overflow_reject_publish_dlx, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish-dlx">>}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+rapid_redeclare(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ Queue = <<"test">>,
+ [begin
+ amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
+ durable = true}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = Queue})
+ end || _I <- lists:seq(1, 20)],
+ ok.
+
+%% Check that by the time we get a declare-ok back, the mirrors are up
+%% and in Mnesia.
+declare_synchrony(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ RabbitCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ HareCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ Q = <<"mirrored-queue">>,
+ declare(RabbitCh, Q),
+ amqp_channel:call(RabbitCh, #'confirm.select'{}),
+ amqp_channel:cast(RabbitCh, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+ amqp_channel:wait_for_confirms(RabbitCh),
+ rabbit_ct_broker_helpers:kill_node(Config, Rabbit),
+
+ #'queue.declare_ok'{message_count = 1} = declare(HareCh, Q),
+ ok.
+
+declare(Ch, Name) ->
+ amqp_channel:call(Ch, #'queue.declare'{durable = true, queue = Name}).
+
+%% Ensure that exclusive queues are cleaned up when part of ha cluster
+%% and node is killed abruptly then restarted
+clean_up_exclusive_queues(Config) ->
+ QName = <<"excl">>,
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>),
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ amqp_channel:call(ChA, #'queue.declare'{queue = QName,
+ exclusive = true}),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, A),
+ timer:sleep(?DELAY),
+ [] = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_amqqueue, list, []),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ timer:sleep(?DELAY),
+ [[],[]] = rabbit_ct_broker_helpers:rpc_all(Config, rabbit_amqqueue, list, []),
+ ok.
+
+clean_up_and_redeclare_exclusive_queues_on_other_nodes(Config) ->
+ QueueCount = 10,
+ QueueNames = lists:map(fun(N) ->
+ NBin = erlang:integer_to_binary(N),
+ <<"exclusive-q-", NBin/binary>>
+ end, lists:seq(1, QueueCount)),
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, A),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ LocationMinMasters = [
+ {<<"x-queue-master-locator">>, longstr, <<"min-masters">>}
+ ],
+ lists:foreach(fun(QueueName) ->
+ declare_exclusive(Ch, QueueName, LocationMinMasters),
+ subscribe(Ch, QueueName)
+ end, QueueNames),
+
+ ok = rabbit_ct_broker_helpers:kill_node(Config, B),
+
+ Cancels = receive_cancels([]),
+ ?assert(length(Cancels) > 0),
+
+ RemaniningQueues = rabbit_ct_broker_helpers:rpc(Config, A, rabbit_amqqueue, list, []),
+
+ ?assertEqual(length(RemaniningQueues), QueueCount - length(Cancels)),
+
+ lists:foreach(fun(QueueName) ->
+ declare_exclusive(Ch, QueueName, LocationMinMasters),
+ true = rabbit_ct_client_helpers:publish(Ch, QueueName, 1),
+ subscribe(Ch, QueueName)
+ end, QueueNames),
+ Messages = receive_messages([]),
+ ?assertEqual(10, length(Messages)),
+ ok = rabbit_ct_client_helpers:close_connection(Conn).
+
+
+consume_survives_stop(Cf) -> consume_survives(Cf, fun stop/2, true).
+consume_survives_sigkill(Cf) -> consume_survives(Cf, fun sigkill/2, true).
+consume_survives_policy(Cf) -> consume_survives(Cf, fun policy/2, true).
+auto_resume(Cf) -> consume_survives(Cf, fun sigkill/2, false).
+auto_resume_no_ccn_client(Cf) -> consume_survives(Cf, fun sigkill/2, false,
+ false).
+
+confirms_survive_stop(Cf) -> confirms_survive(Cf, fun stop/2).
+confirms_survive_policy(Cf) -> confirms_survive(Cf, fun policy/2).
+
+rejects_survive_stop(Cf) -> rejects_survive(Cf, fun stop/2).
+rejects_survive_policy(Cf) -> rejects_survive(Cf, fun policy/2).
+
+%%----------------------------------------------------------------------------
+
+consume_survives(Config, DeathFun, CancelOnFailover) ->
+ consume_survives(Config, DeathFun, CancelOnFailover, true).
+
+consume_survives(Config,
+ DeathFun, CancelOnFailover, CCNSupported) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ Channel1 = rabbit_ct_client_helpers:open_channel(Config, A),
+ Channel2 = rabbit_ct_client_helpers:open_channel(Config, B),
+ Channel3 = rabbit_ct_client_helpers:open_channel(Config, C),
+
+ %% declare the queue on the master, mirrored to the two mirrors
+ Queue = <<"test">>,
+ amqp_channel:call(Channel1, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% start up a consumer
+ ConsCh = case CCNSupported of
+ true -> Channel2;
+ false -> Port = rabbit_ct_broker_helpers:get_node_config(
+ Config, B, tcp_port_amqp),
+ open_incapable_channel(Port)
+ end,
+ ConsumerPid = rabbit_ha_test_consumer:create(
+ ConsCh, Queue, self(), CancelOnFailover, Msgs),
+
+ %% send a bunch of messages from the producer
+ ProducerPid = rabbit_ha_test_producer:create(Channel3, Queue,
+ self(), false, Msgs),
+ DeathFun(Config, A),
+ %% verify that the consumer got all msgs, or die - the await_response
+ %% calls throw an exception if anything goes wrong....
+ ct:pal("awaiting produce ~w", [ProducerPid]),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ ct:pal("awaiting consumer ~w", [ConsumerPid]),
+ rabbit_ha_test_consumer:await_response(ConsumerPid),
+ ok.
+
+confirms_survive(Config, DeathFun) ->
+ [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A),
+ Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B),
+
+ %% declare the queue on the master, mirrored to the two mirrors
+ Queue = <<"test">>,
+ amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue,
+ auto_delete = false,
+ durable = true}),
+
+ %% send one message to ensure the channel is flowing
+ amqp_channel:register_confirm_handler(Node1Channel, self()),
+ #'confirm.select_ok'{} = amqp_channel:call(Node1Channel, #'confirm.select'{}),
+
+ Payload = <<"initial message">>,
+ ok = amqp_channel:call(Node1Channel,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{payload = Payload}),
+
+ ok = receive
+ #'basic.ack'{multiple = false} -> ok;
+ #'basic.nack'{multiple = false} -> message_nacked
+ after
+ 5000 -> confirm_not_received
+ end,
+
+ %% send a bunch of messages from the producer
+ ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
+ self(), true, Msgs),
+ DeathFun(Config, A),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ ok.
+
+rejects_survive(Config, DeathFun) ->
+ [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A),
+ Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B),
+
+ %% declare the queue on the master, mirrored to the two mirrors
+ XOverflow = ?config(overflow, Config),
+ Queue = <<"test_rejects", "_", XOverflow/binary>>,
+ amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue,
+ auto_delete = false,
+ durable = true,
+ arguments = [{<<"x-max-length">>, long, 1},
+ {<<"x-overflow">>, longstr, XOverflow}]}),
+
+ amqp_channel:register_confirm_handler(Node1Channel, self()),
+ #'confirm.select_ok'{} = amqp_channel:call(Node1Channel, #'confirm.select'{}),
+
+ Payload = <<"there can be only one">>,
+ ok = amqp_channel:call(Node1Channel,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{payload = Payload}),
+
+ ok = receive
+ #'basic.ack'{multiple = false} -> ok;
+ #'basic.nack'{multiple = false} -> message_nacked
+ after
+ 5000 -> confirm_not_received
+ end,
+
+ %% send a bunch of messages from the producer. They should all be nacked, as the queue is full.
+ ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
+ self(), true, Msgs, nacks),
+ DeathFun(Config, A),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Node2Channel, #'basic.get'{queue = Queue}),
+ %% There is only one message.
+ #'basic.get_empty'{} = amqp_channel:call(Node2Channel, #'basic.get'{queue = Queue}),
+ ok.
+
+
+
+stop(Config, Node) ->
+ rabbit_ct_broker_helpers:stop_node_after(Config, Node, 50).
+
+sigkill(Config, Node) ->
+ rabbit_ct_broker_helpers:kill_node_after(Config, Node, 50).
+
+policy(Config, Node)->
+ Nodes = [
+ rabbit_misc:atom_to_binary(N)
+ || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ N =/= Node],
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>,
+ {<<"nodes">>, Nodes}).
+
+open_incapable_channel(NodePort) ->
+ Props = [{<<"capabilities">>, table, []}],
+ {ok, ConsConn} =
+ amqp_connection:start(#amqp_params_network{port = NodePort,
+ client_properties = Props}),
+ {ok, Ch} = amqp_connection:open_channel(ConsConn),
+ Ch.
+
+declare_exclusive(Ch, QueueName, Args) ->
+ Declare = #'queue.declare'{queue = QueueName,
+ exclusive = true,
+ arguments = Args
+ },
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, Declare).
+
+subscribe(Ch, QueueName) ->
+ ConsumeOk = amqp_channel:call(Ch, #'basic.consume'{queue = QueueName,
+ no_ack = true}),
+ #'basic.consume_ok'{} = ConsumeOk,
+ receive ConsumeOk -> ok after ?DELAY -> throw(consume_ok_timeout) end.
+
+receive_cancels(Cancels) ->
+ receive
+ #'basic.cancel'{} = C ->
+ receive_cancels([C|Cancels])
+ after ?DELAY ->
+ Cancels
+ end.
+
+receive_messages(All) ->
+ receive
+ {#'basic.deliver'{}, Msg} ->
+ receive_messages([Msg|All])
+ after ?DELAY ->
+ lists:reverse(All)
+ end.
diff --git a/deps/rabbit/test/single_active_consumer_SUITE.erl b/deps/rabbit/test/single_active_consumer_SUITE.erl
new file mode 100644
index 0000000000..59f2b6e83d
--- /dev/null
+++ b/deps/rabbit/test/single_active_consumer_SUITE.erl
@@ -0,0 +1,376 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(single_active_consumer_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, classic_queue}, {group, quorum_queue}
+ ].
+
+groups() ->
+ [
+ {classic_queue, [], [
+ all_messages_go_to_one_consumer,
+ fallback_to_another_consumer_when_first_one_is_cancelled,
+ fallback_to_another_consumer_when_exclusive_consumer_channel_is_cancelled,
+ fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks,
+ amqp_exclusive_consume_fails_on_exclusive_consumer_queue
+ ]},
+ {quorum_queue, [], [
+ all_messages_go_to_one_consumer,
+ fallback_to_another_consumer_when_first_one_is_cancelled,
+ fallback_to_another_consumer_when_exclusive_consumer_channel_is_cancelled,
+ fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks,
+ basic_get_is_unsupported
+ %% amqp_exclusive_consume_fails_on_exclusive_consumer_queue % Exclusive consume not implemented in QQ
+ ]}
+ ].
+
+init_per_suite(Config0) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config0, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config1, {rabbit, [{quorum_tick_interval, 1000}]}),
+ rabbit_ct_helpers:run_setup_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(classic_queue, Config) ->
+ [{single_active_consumer_queue_declare,
+ #'queue.declare'{arguments = [
+ {<<"x-single-active-consumer">>, bool, true},
+ {<<"x-queue-type">>, longstr, <<"classic">>}
+ ],
+ auto_delete = true}
+ } | Config];
+init_per_group(quorum_queue, Config) ->
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_feature_flags, enable, [quorum_queue]),
+ case Ret of
+ ok ->
+ [{single_active_consumer_queue_declare,
+ #'queue.declare'{
+ arguments = [
+ {<<"x-single-active-consumer">>, bool, true},
+ {<<"x-queue-type">>, longstr, <<"quorum">>}
+ ],
+ durable = true, exclusive = false, auto_delete = false}
+ } | Config];
+ Error ->
+ {skip, {"Quorum queues are unsupported", Error}}
+ end.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config0) ->
+ Config = [{queue_name, atom_to_binary(Testcase, utf8)} | Config0],
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+all_messages_go_to_one_consumer(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ MessageCount = 5,
+ ConsumerPid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount}]),
+ #'basic.consume_ok'{consumer_tag = CTag1} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+ #'basic.consume_ok'{consumer_tag = CTag2} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(1, MessageCount)],
+
+ receive
+ {consumer_done, {MessagesPerConsumer, MessageCount}} ->
+ ?assertEqual(MessageCount, MessageCount),
+ ?assertEqual(2, maps:size(MessagesPerConsumer)),
+ ?assertEqual(MessageCount, maps:get(CTag1, MessagesPerConsumer)),
+ ?assertEqual(0, maps:get(CTag2, MessagesPerConsumer))
+ after ?TIMEOUT ->
+ flush(),
+ throw(failed)
+ end,
+
+ amqp_connection:close(C),
+ ok.
+
+fallback_to_another_consumer_when_first_one_is_cancelled(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ MessageCount = 10,
+ ConsumerPid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount}]),
+ #'basic.consume_ok'{consumer_tag = CTag1} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+ #'basic.consume_ok'{consumer_tag = CTag2} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+ #'basic.consume_ok'{consumer_tag = CTag3} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(1, MessageCount div 2)],
+
+ {ok, {MessagesPerConsumer1, _}} = wait_for_messages(MessageCount div 2),
+ FirstActiveConsumerInList = maps:keys(maps:filter(fun(_CTag, Count) -> Count > 0 end, MessagesPerConsumer1)),
+ ?assertEqual(1, length(FirstActiveConsumerInList)),
+
+ FirstActiveConsumer = lists:nth(1, FirstActiveConsumerInList),
+ #'basic.cancel_ok'{} = amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = FirstActiveConsumer}),
+
+ {cancel_ok, FirstActiveConsumer} = wait_for_cancel_ok(),
+
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(MessageCount div 2 + 1, MessageCount - 1)],
+
+ {ok, {MessagesPerConsumer2, _}} = wait_for_messages(MessageCount div 2 - 1),
+ SecondActiveConsumerInList = maps:keys(maps:filter(
+ fun(CTag, Count) -> Count > 0 andalso CTag /= FirstActiveConsumer end,
+ MessagesPerConsumer2)
+ ),
+ ?assertEqual(1, length(SecondActiveConsumerInList)),
+ SecondActiveConsumer = lists:nth(1, SecondActiveConsumerInList),
+
+ #'basic.cancel_ok'{} = amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = SecondActiveConsumer}),
+
+ amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}),
+ ?assertMatch({ok, _}, wait_for_messages(1)),
+
+ LastActiveConsumer = lists:nth(1, lists:delete(FirstActiveConsumer, lists:delete(SecondActiveConsumer, [CTag1, CTag2, CTag3]))),
+
+ receive
+ {consumer_done, {MessagesPerConsumer, MessageCount}} ->
+ ?assertEqual(MessageCount, MessageCount),
+ ?assertEqual(3, maps:size(MessagesPerConsumer)),
+ ?assertEqual(MessageCount div 2, maps:get(FirstActiveConsumer, MessagesPerConsumer)),
+ ?assertEqual(MessageCount div 2 - 1, maps:get(SecondActiveConsumer, MessagesPerConsumer)),
+ ?assertEqual(1, maps:get(LastActiveConsumer, MessagesPerConsumer))
+ after ?TIMEOUT ->
+ flush(),
+ throw(failed)
+ end,
+
+ amqp_connection:close(C),
+ ok.
+
+fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks(Config) ->
+ %% Let's ensure that although the consumer is cancelled we still keep the unacked
+ %% messages and accept acknowledgments on them.
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = false}, self()),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = false}, self()),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = false}, self()),
+ Consumers0 = rpc:call(Server, rabbit_amqqueue, consumers_all, [<<"/">>]),
+ ?assertMatch([_, _, _], lists:filter(fun(Props) ->
+ Resource = proplists:get_value(queue_name, Props),
+ Q == Resource#resource.name
+ end, Consumers0)),
+
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = P}) || P <- [<<"msg1">>, <<"msg2">>]],
+
+ {CTag, DTag1} = receive_deliver(),
+ {_CTag, DTag2} = receive_deliver(),
+
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]),
+ #'basic.cancel_ok'{} = amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+
+ receive
+ #'basic.cancel_ok'{consumer_tag = CTag} ->
+ ok
+ end,
+ Consumers1 = rpc:call(Server, rabbit_amqqueue, consumers_all, [<<"/">>]),
+ ?assertMatch([_, _], lists:filter(fun(Props) ->
+ Resource = proplists:get_value(queue_name, Props),
+ Q == Resource#resource.name
+ end, Consumers1)),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]),
+
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = P}) || P <- [<<"msg3">>, <<"msg4">>]],
+
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"4">>, <<"0">>, <<"4">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag1}),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag2}),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]),
+
+ amqp_connection:close(C),
+ ok.
+
+fallback_to_another_consumer_when_exclusive_consumer_channel_is_cancelled(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ {C1, Ch1} = connection_and_channel(Config),
+ {C2, Ch2} = connection_and_channel(Config),
+ {C3, Ch3} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ MessageCount = 10,
+ Consumer1Pid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount div 2}]),
+ Consumer2Pid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount div 2 - 1}]),
+ Consumer3Pid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount div 2 - 1}]),
+ #'basic.consume_ok'{consumer_tag = CTag1} =
+ amqp_channel:subscribe(Ch1, #'basic.consume'{queue = Q, no_ack = true, consumer_tag = <<"1">>}, Consumer1Pid),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch2, #'basic.consume'{queue = Q, no_ack = true, consumer_tag = <<"2">>}, Consumer2Pid),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch3, #'basic.consume'{queue = Q, no_ack = true, consumer_tag = <<"3">>}, Consumer3Pid),
+
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(1, MessageCount div 2)],
+
+ {MessagesPerConsumer1, MessageCount1} = consume_results(),
+ ?assertEqual(MessageCount div 2, MessageCount1),
+ ?assertEqual(1, maps:size(MessagesPerConsumer1)),
+ ?assertEqual(MessageCount div 2, maps:get(CTag1, MessagesPerConsumer1)),
+
+ ok = amqp_channel:close(Ch1),
+
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(MessageCount div 2 + 1, MessageCount - 1)],
+
+ {MessagesPerConsumer2, MessageCount2} = consume_results(),
+ ?assertEqual(MessageCount div 2 - 1, MessageCount2),
+ ?assertEqual(1, maps:size(MessagesPerConsumer2)),
+
+ ok = amqp_channel:close(Ch2),
+
+ amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"poison">>}),
+
+ {MessagesPerConsumer3, MessageCount3} = consume_results(),
+ ?assertEqual(1, MessageCount3),
+ ?assertEqual(1, maps:size(MessagesPerConsumer3)),
+
+ [amqp_connection:close(Conn) || Conn <- [C1, C2, C3, C]],
+ ok.
+
+basic_get_is_unsupported(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 405, _}}, _},
+ amqp_channel:call(Ch, #'basic.get'{queue = Q, no_ack = false})),
+
+ amqp_connection:close(C),
+ ok.
+
+amqp_exclusive_consume_fails_on_exclusive_consumer_queue(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 403, _}}, _},
+ amqp_channel:call(Ch, #'basic.consume'{queue = Q, exclusive = true})
+ ),
+ amqp_connection:close(C),
+ ok.
+
+connection_and_channel(Config) ->
+ C = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Ch} = amqp_connection:open_channel(C),
+ {C, Ch}.
+
+queue_declare(Channel, Config) ->
+ QueueName = ?config(queue_name, Config),
+ Declare0 = ?config(single_active_consumer_queue_declare, Config),
+ Declare = Declare0#'queue.declare'{queue = QueueName},
+ #'queue.declare_ok'{queue = Q} = amqp_channel:call(Channel, Declare),
+ Q.
+
+consume({Parent, State, 0}) ->
+ Parent ! {consumer_done, State};
+consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown}) ->
+ receive
+ #'basic.consume_ok'{consumer_tag = CTag} ->
+ consume({Parent, {maps:put(CTag, 0, MessagesPerConsumer), MessageCount}, CountDown});
+ {#'basic.deliver'{consumer_tag = CTag}, #amqp_msg{payload = <<"poison">>}} ->
+ Parent ! {consumer_done,
+ {maps:update_with(CTag, fun(V) -> V + 1 end, MessagesPerConsumer),
+ MessageCount + 1}};
+ {#'basic.deliver'{consumer_tag = CTag}, _Content} ->
+ NewState = {maps:update_with(CTag, fun(V) -> V + 1 end, MessagesPerConsumer),
+ MessageCount + 1},
+ Parent ! {message, NewState},
+ consume({Parent, NewState, CountDown - 1});
+ #'basic.cancel_ok'{consumer_tag = CTag} ->
+ Parent ! {cancel_ok, CTag},
+ consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown});
+ _ ->
+ consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown})
+ after ?TIMEOUT ->
+ Parent ! {consumer_timeout, {MessagesPerConsumer, MessageCount}},
+ flush(),
+ exit(consumer_timeout)
+ end.
+
+consume_results() ->
+ receive
+ {consumer_done, {MessagesPerConsumer, MessageCount}} ->
+ {MessagesPerConsumer, MessageCount};
+ {consumer_timeout, {MessagesPerConsumer, MessageCount}} ->
+ {MessagesPerConsumer, MessageCount};
+ _ ->
+ consume_results()
+ after ?TIMEOUT ->
+ flush(),
+ throw(failed)
+ end.
+
+wait_for_messages(ExpectedCount) ->
+ wait_for_messages(ExpectedCount, {}).
+
+wait_for_messages(0, State) ->
+ {ok, State};
+wait_for_messages(ExpectedCount, State) ->
+ receive
+ {message, {MessagesPerConsumer, MessageCount}} ->
+ wait_for_messages(ExpectedCount - 1, {MessagesPerConsumer, MessageCount})
+ after 5000 ->
+ {missing, ExpectedCount, State}
+ end.
+
+wait_for_cancel_ok() ->
+ receive
+ {cancel_ok, CTag} ->
+ {cancel_ok, CTag}
+ after 5000 ->
+ throw(consumer_cancel_ok_timeout)
+ end.
+
+receive_deliver() ->
+ receive
+ {#'basic.deliver'{consumer_tag = CTag,
+ delivery_tag = DTag}, _} ->
+ {CTag, DTag}
+ after 5000 ->
+ exit(deliver_timeout)
+ end.
+
+flush() ->
+ receive
+ Msg ->
+ ct:pal("flushed: ~w~n", [Msg]),
+ flush()
+ after 10 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/sync_detection_SUITE.erl b/deps/rabbit/test/sync_detection_SUITE.erl
new file mode 100644
index 0000000000..55a86b7b3d
--- /dev/null
+++ b/deps/rabbit/test/sync_detection_SUITE.erl
@@ -0,0 +1,243 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(sync_detection_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ follower_synchronization
+ ]},
+ {cluster_size_3, [], [
+ follower_synchronization_ttl
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+follower_synchronization(Config) ->
+ [Master, Slave] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
+ Queue = <<"ha.two.test">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% The comments on the right are the queue length and the pending acks on
+ %% the master.
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+
+ %% We get and ack one message when the mirror is down, and check that when we
+ %% start the mirror it's not marked as synced until ack the message. We also
+ %% publish another message when the mirror is up.
+ send_dummy_message(Channel, Queue), % 1 - 0
+ {#'basic.get_ok'{delivery_tag = Tag1}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
+
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+
+ follower_unsynced(Master, Queue),
+ send_dummy_message(Channel, Queue), % 1 - 1
+ follower_unsynced(Master, Queue),
+
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag1}), % 1 - 0
+
+ follower_synced(Master, Queue),
+
+ %% We restart the mirror and we send a message, so that the mirror will only
+ %% have one of the messages.
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+
+ send_dummy_message(Channel, Queue), % 2 - 0
+
+ follower_unsynced(Master, Queue),
+
+ %% We reject the message that the mirror doesn't have, and verify that it's
+ %% still unsynced
+ {#'basic.get_ok'{delivery_tag = Tag2}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
+ follower_unsynced(Master, Queue),
+ amqp_channel:cast(Channel, #'basic.reject'{ delivery_tag = Tag2,
+ requeue = true }), % 2 - 0
+ follower_unsynced(Master, Queue),
+ {#'basic.get_ok'{delivery_tag = Tag3}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag3}), % 1 - 0
+ follower_synced(Master, Queue),
+ {#'basic.get_ok'{delivery_tag = Tag4}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag4}), % 0 - 0
+ follower_synced(Master, Queue).
+
+follower_synchronization_ttl(Config) ->
+ [Master, Slave, DLX] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
+ DLXChannel = rabbit_ct_client_helpers:open_channel(Config, DLX),
+
+ %% We declare a DLX queue to wait for messages to be TTL'ed
+ DLXQueue = <<"dlx-queue">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = DLXQueue,
+ auto_delete = false}),
+
+ TestMsgTTL = 5000,
+ Queue = <<"ha.two.test">>,
+ %% Sadly we need fairly high numbers for the TTL because starting/stopping
+ %% nodes takes a fair amount of time.
+ Args = [{<<"x-message-ttl">>, long, TestMsgTTL},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, DLXQueue}],
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
+ auto_delete = false,
+ arguments = Args}),
+
+ follower_synced(Master, Queue),
+
+ %% All unknown
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ send_dummy_message(Channel, Queue),
+ send_dummy_message(Channel, Queue),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+ follower_unsynced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ follower_synced(Master, Queue),
+
+ %% 1 unknown, 1 known
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ send_dummy_message(Channel, Queue),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+ follower_unsynced(Master, Queue),
+ send_dummy_message(Channel, Queue),
+ follower_unsynced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ follower_synced(Master, Queue),
+
+ %% %% both known
+ send_dummy_message(Channel, Queue),
+ send_dummy_message(Channel, Queue),
+ follower_synced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ follower_synced(Master, Queue),
+
+ ok.
+
+send_dummy_message(Channel, Queue) ->
+ Payload = <<"foo">>,
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Queue},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+follower_pids(Node, Queue) ->
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+ [rabbit_misc:r(<<"/">>, queue, Queue)]),
+ SSP = synchronised_slave_pids,
+ [{SSP, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [SSP]]),
+ case Pids of
+ '' -> [];
+ _ -> Pids
+ end.
+
+%% The mnesia synchronization takes a while, but we don't want to wait for the
+%% test to fail, since the timetrap is quite high.
+wait_for_sync_status(Status, Node, Queue) ->
+ Max = 90000 / ?LOOP_RECURSION_DELAY,
+ wait_for_sync_status(0, Max, Status, Node, Queue).
+
+wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max ->
+ erlang:error({sync_status_max_tries_failed,
+ [{queue, Queue},
+ {node, Node},
+ {expected_status, Status},
+ {max_tried, Max}]});
+wait_for_sync_status(N, Max, Status, Node, Queue) ->
+ Synced = length(follower_pids(Node, Queue)) =:= 1,
+ case Synced =:= Status of
+ true -> ok;
+ false -> timer:sleep(?LOOP_RECURSION_DELAY),
+ wait_for_sync_status(N + 1, Max, Status, Node, Queue)
+ end.
+
+follower_synced(Node, Queue) ->
+ wait_for_sync_status(true, Node, Queue).
+
+follower_unsynced(Node, Queue) ->
+ wait_for_sync_status(false, Node, Queue).
+
+wait_for_messages(Queue, Channel, N) ->
+ Sub = #'basic.consume'{queue = Queue},
+ #'basic.consume_ok'{consumer_tag = CTag} = amqp_channel:call(Channel, Sub),
+ receive
+ #'basic.consume_ok'{} -> ok
+ end,
+ lists:foreach(
+ fun (_) -> receive
+ {#'basic.deliver'{delivery_tag = Tag}, _Content} ->
+ amqp_channel:cast(Channel,
+ #'basic.ack'{delivery_tag = Tag})
+ end
+ end, lists:seq(1, N)),
+ amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = CTag}).
diff --git a/deps/rabbit/test/temp/head_message_timestamp_tests.py b/deps/rabbit/test/temp/head_message_timestamp_tests.py
new file mode 100755
index 0000000000..6698b88b7b
--- /dev/null
+++ b/deps/rabbit/test/temp/head_message_timestamp_tests.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+#
+# Tests for the SLA patch which adds the head_message_timestamp queue stat.
+# Uses both the management interface via rabbitmqadmin and the AMQP interface via Pika.
+# There's no particular reason to have used rabbitmqadmin other than saving some bulk.
+# Similarly, the separate declaration of exchanges and queues is just a preference
+# following a typical enterprise policy where admin users create these resources.
+
+from datetime import datetime
+import json
+import pika
+import os
+import sys
+from time import clock, mktime, sleep
+import unittest
+
+# Uses the rabbitmqadmin script.
+# To be imported this must be given a .py suffix and placed on the Python path
+from rabbitmqadmin import *
+
+TEXCH = 'head-message-timestamp-test'
+TQUEUE = 'head-message-timestamp-test-queue'
+
+TIMEOUT_SECS = 10
+
+TIMESTAMP1 = mktime(datetime(2010,1,1,12,00,01).timetuple())
+TIMESTAMP2 = mktime(datetime(2010,1,1,12,00,02).timetuple())
+
+AMQP_PORT = 99
+
+DELIVERY_MODE = 2
+DURABLE = False
+
+def log(msg):
+ print("\nINFO: " + msg)
+
+class RabbitTestCase(unittest.TestCase):
+ def setUp(self):
+ parser.set_conflict_handler('resolve')
+ (options, args) = make_configuration()
+ AMQP_PORT = int(options.port) - 10000
+
+ self.mgmt = Management(options, args)
+ self.mgmt.put('/exchanges/%2f/' + TEXCH, '{"type" : "fanout", "durable":' + str(DURABLE).lower() + '}')
+ self.mgmt.put('/queues/%2f/' + TQUEUE, '{"auto_delete":false,"durable":' + str(DURABLE).lower() + ',"arguments":[]}')
+ self.mgmt.post('/bindings/%2f/e/' + TEXCH + '/q/' + TQUEUE, '{"routing_key": ".*", "arguments":[]}')
+ self.credentials = pika.PlainCredentials(options.username, options.password)
+ parameters = pika.ConnectionParameters(options.hostname, port=AMQP_PORT, credentials=self.credentials)
+ self.connection = pika.BlockingConnection(parameters)
+ self.channel = self.connection.channel()
+
+ def tearDown(self):
+ parser.set_conflict_handler('resolve')
+ (options, args) = make_configuration()
+ self.mgmt = Management(options, args)
+ self.mgmt.delete('/queues/%2f/' + TQUEUE)
+ self.mgmt.delete('/exchanges/%2f/' + TEXCH)
+
+class RabbitSlaTestCase(RabbitTestCase):
+ def get_queue_stats(self, queue_name):
+ stats_str = self.mgmt.get('/queues/%2f/' + queue_name)
+ return json.loads(stats_str)
+
+ def get_head_message_timestamp(self, queue_name):
+ return self.get_queue_stats(queue_name)["head_message_timestamp"]
+
+ def send(self, message, timestamp=None):
+ self.channel.basic_publish(TEXCH, '', message,
+ pika.BasicProperties(content_type='text/plain',
+ delivery_mode=DELIVERY_MODE,
+ timestamp=timestamp))
+ log("Sent message with body: " + str(message))
+
+ def receive(self, queue):
+ method_frame, header_frame, body = self.channel.basic_get(queue = queue)
+ log("Received message with body: " + str(body))
+ return method_frame.delivery_tag, body
+
+ def ack(self, delivery_tag):
+ self.channel.basic_ack(delivery_tag)
+
+ def nack(self, delivery_tag):
+ self.channel.basic_nack(delivery_tag)
+
+ def wait_for_new_timestamp(self, queue, old_timestamp):
+ stats_wait_start = clock()
+ while ((clock() - stats_wait_start) < TIMEOUT_SECS and
+ self.get_head_message_timestamp(queue) == old_timestamp):
+ sleep(0.1)
+ log('Queue stats updated in ' + str(clock() - stats_wait_start) + ' secs.')
+ return self.get_head_message_timestamp(queue)
+
+ # TESTS
+
+ def test_no_timestamp_when_queue_is_empty(self):
+ assert self.get_head_message_timestamp(TQUEUE) == ''
+
+ def test_has_timestamp_when_first_msg_is_added(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ assert stats_timestamp == TIMESTAMP1
+
+ def test_no_timestamp_when_last_msg_is_removed(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ tag, body = self.receive(TQUEUE)
+ self.ack(tag)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, TIMESTAMP1)
+ assert stats_timestamp == ''
+
+ def test_timestamp_updated_when_msg_is_removed(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ self.send('Msg2', TIMESTAMP2)
+ tag, body = self.receive(TQUEUE)
+ self.ack(tag)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, TIMESTAMP1)
+ assert stats_timestamp == TIMESTAMP2
+
+ def test_timestamp_not_updated_before_msg_is_acked(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ tag, body = self.receive(TQUEUE)
+ sleep(1) # Allow time for update to appear if it was going to (it shouldn't)
+ assert self.get_head_message_timestamp(TQUEUE) == TIMESTAMP1
+ self.ack(tag)
+
+if __name__ == '__main__':
+ unittest.main(verbosity = 2)
+
+
diff --git a/deps/rabbit/test/temp/rabbitmqadmin.py b/deps/rabbit/test/temp/rabbitmqadmin.py
new file mode 100755
index 0000000000..cdddd56497
--- /dev/null
+++ b/deps/rabbit/test/temp/rabbitmqadmin.py
@@ -0,0 +1,934 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+# Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved
+
+import sys
+if sys.version_info[0] < 2 or sys.version_info[1] < 6:
+ print "Sorry, rabbitmqadmin requires at least Python 2.6."
+ sys.exit(1)
+
+from ConfigParser import ConfigParser, NoSectionError
+from optparse import OptionParser, TitledHelpFormatter
+import httplib
+import urllib
+import urlparse
+import base64
+import json
+import os
+import socket
+
+VERSION = '0.0.0'
+
+LISTABLE = {'connections': {'vhost': False},
+ 'channels': {'vhost': False},
+ 'consumers': {'vhost': True},
+ 'exchanges': {'vhost': True},
+ 'queues': {'vhost': True},
+ 'bindings': {'vhost': True},
+ 'users': {'vhost': False},
+ 'vhosts': {'vhost': False},
+ 'permissions': {'vhost': False},
+ 'nodes': {'vhost': False},
+ 'parameters': {'vhost': False,
+ 'json': ['value']},
+ 'policies': {'vhost': False,
+ 'json': ['definition']}}
+
+SHOWABLE = {'overview': {'vhost': False}}
+
+PROMOTE_COLUMNS = ['vhost', 'name', 'type',
+ 'source', 'destination', 'destination_type', 'routing_key']
+
+URIS = {
+ 'exchange': '/exchanges/{vhost}/{name}',
+ 'queue': '/queues/{vhost}/{name}',
+ 'binding': '/bindings/{vhost}/e/{source}/{destination_char}/{destination}',
+ 'binding_del':'/bindings/{vhost}/e/{source}/{destination_char}/{destination}/{properties_key}',
+ 'vhost': '/vhosts/{name}',
+ 'user': '/users/{name}',
+ 'permission': '/permissions/{vhost}/{user}',
+ 'parameter': '/parameters/{component}/{vhost}/{name}',
+ 'policy': '/policies/{vhost}/{name}'
+ }
+
+DECLARABLE = {
+ 'exchange': {'mandatory': ['name', 'type'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'internal': 'false', 'arguments': {}}},
+ 'queue': {'mandatory': ['name'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'arguments': {}, 'node': None}},
+ 'binding': {'mandatory': ['source', 'destination'],
+ 'json': ['arguments'],
+ 'optional': {'destination_type': 'queue',
+ 'routing_key': '', 'arguments': {}}},
+ 'vhost': {'mandatory': ['name'],
+ 'optional': {'tracing': None}},
+ 'user': {'mandatory': ['name', 'password', 'tags'],
+ 'optional': {}},
+ 'permission': {'mandatory': ['vhost', 'user', 'configure', 'write', 'read'],
+ 'optional': {}},
+ 'parameter': {'mandatory': ['component', 'name', 'value'],
+ 'json': ['value'],
+ 'optional': {}},
+ # Priority is 'json' to convert to int
+ 'policy': {'mandatory': ['name', 'pattern', 'definition'],
+ 'json': ['definition', 'priority'],
+ 'optional': {'priority' : 0, 'apply-to': None}}
+ }
+
+DELETABLE = {
+ 'exchange': {'mandatory': ['name']},
+ 'queue': {'mandatory': ['name']},
+ 'binding': {'mandatory': ['source', 'destination_type', 'destination',
+ 'properties_key']},
+ 'vhost': {'mandatory': ['name']},
+ 'user': {'mandatory': ['name']},
+ 'permission': {'mandatory': ['vhost', 'user']},
+ 'parameter': {'mandatory': ['component', 'name']},
+ 'policy': {'mandatory': ['name']}
+ }
+
+CLOSABLE = {
+ 'connection': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/connections/{name}'}
+ }
+
+PURGABLE = {
+ 'queue': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/queues/{vhost}/{name}/contents'}
+ }
+
+EXTRA_VERBS = {
+ 'publish': {'mandatory': ['routing_key'],
+ 'optional': {'payload': None,
+ 'exchange': 'amq.default',
+ 'payload_encoding': 'string'},
+ 'uri': '/exchanges/{vhost}/{exchange}/publish'},
+ 'get': {'mandatory': ['queue'],
+ 'optional': {'count': '1', 'requeue': 'true',
+ 'payload_file': None, 'encoding': 'auto'},
+ 'uri': '/queues/{vhost}/{queue}/get'}
+}
+
+for k in DECLARABLE:
+ DECLARABLE[k]['uri'] = URIS[k]
+
+for k in DELETABLE:
+ DELETABLE[k]['uri'] = URIS[k]
+ DELETABLE[k]['optional'] = {}
+DELETABLE['binding']['uri'] = URIS['binding_del']
+
+def short_usage():
+ return "rabbitmqadmin [options] subcommand"
+
+def title(name):
+ return "\n%s\n%s\n\n" % (name, '=' * len(name))
+
+def subcommands_usage():
+ usage = """Usage
+=====
+ """ + short_usage() + """
+
+ where subcommand is one of:
+""" + title("Display")
+
+ for l in LISTABLE:
+ usage += " list {0} [<column>...]\n".format(l)
+ for s in SHOWABLE:
+ usage += " show {0} [<column>...]\n".format(s)
+ usage += title("Object Manipulation")
+ usage += fmt_usage_stanza(DECLARABLE, 'declare')
+ usage += fmt_usage_stanza(DELETABLE, 'delete')
+ usage += fmt_usage_stanza(CLOSABLE, 'close')
+ usage += fmt_usage_stanza(PURGABLE, 'purge')
+ usage += title("Broker Definitions")
+ usage += """ export <file>
+ import <file>
+"""
+ usage += title("Publishing and Consuming")
+ usage += fmt_usage_stanza(EXTRA_VERBS, '')
+ usage += """
+ * If payload is not specified on publish, standard input is used
+
+ * If payload_file is not specified on get, the payload will be shown on
+ standard output along with the message metadata
+
+ * If payload_file is specified on get, count must not be set
+"""
+ return usage
+
+def config_usage():
+ usage = "Usage\n=====\n" + short_usage()
+ usage += "\n" + title("Configuration File")
+ usage += """ It is possible to specify a configuration file from the command line.
+ Hosts can be configured easily in a configuration file and called
+ from the command line.
+"""
+ usage += title("Example")
+ usage += """ # rabbitmqadmin.conf.example START
+
+ [host_normal]
+ hostname = localhost
+ port = 15672
+ username = guest
+ password = guest
+ declare_vhost = / # Used as default for declare / delete only
+ vhost = / # Used as default for declare / delete / list
+
+ [host_ssl]
+ hostname = otherhost
+ port = 15672
+ username = guest
+ password = guest
+ ssl = True
+ ssl_key_file = /path/to/key.pem
+ ssl_cert_file = /path/to/cert.pem
+
+ # rabbitmqadmin.conf.example END
+"""
+ usage += title("Use")
+ usage += """ rabbitmqadmin -c rabbitmqadmin.conf.example -N host_normal ..."""
+ return usage
+
+def more_help():
+ return """
+More Help
+=========
+
+For more help use the help subcommand:
+
+ rabbitmqadmin help subcommands # For a list of available subcommands
+ rabbitmqadmin help config # For help with the configuration file
+"""
+
+def fmt_usage_stanza(root, verb):
+ def fmt_args(args):
+ res = " ".join(["{0}=...".format(a) for a in args['mandatory']])
+ opts = " ".join("{0}=...".format(o) for o in args['optional'].keys())
+ if opts != "":
+ res += " [{0}]".format(opts)
+ return res
+
+ text = ""
+ if verb != "":
+ verb = " " + verb
+ for k in root.keys():
+ text += " {0} {1} {2}\n".format(verb, k, fmt_args(root[k]))
+ return text
+
+default_options = { "hostname" : "localhost",
+ "port" : "15672",
+ "declare_vhost" : "/",
+ "username" : "guest",
+ "password" : "guest",
+ "ssl" : False,
+ "verbose" : True,
+ "format" : "table",
+ "depth" : 1,
+ "bash_completion" : False }
+
+
+class MyFormatter(TitledHelpFormatter):
+ def format_epilog(self, epilog):
+ return epilog
+
+parser = OptionParser(usage=short_usage(),
+ formatter=MyFormatter(),
+ epilog=more_help())
+
+def make_parser():
+ def add(*args, **kwargs):
+ key = kwargs['dest']
+ if key in default_options:
+ default = " [default: %s]" % default_options[key]
+ kwargs['help'] = kwargs['help'] + default
+ parser.add_option(*args, **kwargs)
+
+ add("-c", "--config", dest="config",
+ help="configuration file [default: ~/.rabbitmqadmin.conf]",
+ metavar="CONFIG")
+ add("-N", "--node", dest="node",
+ help="node described in the configuration file [default: 'default'" + \
+ " only if configuration file is specified]",
+ metavar="NODE")
+ add("-H", "--host", dest="hostname",
+ help="connect to host HOST" ,
+ metavar="HOST")
+ add("-P", "--port", dest="port",
+ help="connect to port PORT",
+ metavar="PORT")
+ add("-V", "--vhost", dest="vhost",
+ help="connect to vhost VHOST [default: all vhosts for list, '/' for declare]",
+ metavar="VHOST")
+ add("-u", "--username", dest="username",
+ help="connect using username USERNAME",
+ metavar="USERNAME")
+ add("-p", "--password", dest="password",
+ help="connect using password PASSWORD",
+ metavar="PASSWORD")
+ add("-q", "--quiet", action="store_false", dest="verbose",
+ help="suppress status messages")
+ add("-s", "--ssl", action="store_true", dest="ssl",
+ help="connect with ssl")
+ add("--ssl-key-file", dest="ssl_key_file",
+ help="PEM format key file for SSL")
+ add("--ssl-cert-file", dest="ssl_cert_file",
+ help="PEM format certificate file for SSL")
+ add("-f", "--format", dest="format",
+ help="format for listing commands - one of [" + ", ".join(FORMATS.keys()) + "]")
+ add("-S", "--sort", dest="sort", help="sort key for listing queries")
+ add("-R", "--sort-reverse", action="store_true", dest="sort_reverse",
+ help="reverse the sort order")
+ add("-d", "--depth", dest="depth",
+ help="maximum depth to recurse for listing tables")
+ add("--bash-completion", action="store_true",
+ dest="bash_completion",
+ help="Print bash completion script")
+ add("--version", action="store_true",
+ dest="version",
+ help="Display version and exit")
+
+def default_config():
+ home = os.getenv('USERPROFILE') or os.getenv('HOME')
+ if home is not None:
+ config_file = home + os.sep + ".rabbitmqadmin.conf"
+ if os.path.isfile(config_file):
+ return config_file
+ return None
+
+def make_configuration():
+ make_parser()
+ (options, args) = parser.parse_args()
+ setattr(options, "declare_vhost", None)
+ if options.version:
+ print_version()
+ if options.config is None:
+ config_file = default_config()
+ if config_file is not None:
+ setattr(options, "config", config_file)
+ else:
+ if not os.path.isfile(options.config):
+ assert_usage(False,
+ "Could not read config file '%s'" % options.config)
+
+ if options.node is None and options.config:
+ options.node = "default"
+ else:
+ options.node = options.node
+ for (key, val) in default_options.items():
+ if getattr(options, key) is None:
+ setattr(options, key, val)
+
+ if options.config is not None:
+ config = ConfigParser()
+ try:
+ config.read(options.config)
+ new_conf = dict(config.items(options.node))
+ except NoSectionError, error:
+ if options.node == "default":
+ pass
+ else:
+ assert_usage(False, ("Could not read section '%s' in config file" +
+ " '%s':\n %s") %
+ (options.node, options.config, error))
+ else:
+ for key, val in new_conf.items():
+ setattr(options, key, val)
+
+ return (options, args)
+
+def assert_usage(expr, error):
+ if not expr:
+ output("\nERROR: {0}\n".format(error))
+ output("{0} --help for help\n".format(os.path.basename(sys.argv[0])))
+ sys.exit(1)
+
+def print_version():
+ output("rabbitmqadmin {0}".format(VERSION))
+ sys.exit(0)
+
+def column_sort_key(col):
+ if col in PROMOTE_COLUMNS:
+ return (1, PROMOTE_COLUMNS.index(col))
+ else:
+ return (2, col)
+
+def main():
+ (options, args) = make_configuration()
+ if options.bash_completion:
+ print_bash_completion()
+ exit(0)
+ assert_usage(len(args) > 0, 'Action not specified')
+ mgmt = Management(options, args[1:])
+ mode = "invoke_" + args[0]
+ assert_usage(hasattr(mgmt, mode),
+ 'Action {0} not understood'.format(args[0]))
+ method = getattr(mgmt, "invoke_%s" % args[0])
+ method()
+
+def output(s):
+ print maybe_utf8(s, sys.stdout)
+
+def die(s):
+ sys.stderr.write(maybe_utf8("*** {0}\n".format(s), sys.stderr))
+ exit(1)
+
+def maybe_utf8(s, stream):
+ if stream.isatty():
+ # It will have an encoding, which Python will respect
+ return s
+ else:
+ # It won't have an encoding, and Python will pick ASCII by default
+ return s.encode('utf-8')
+
+class Management:
+ def __init__(self, options, args):
+ self.options = options
+ self.args = args
+
+ def get(self, path):
+ return self.http("GET", "/api%s" % path, "")
+
+ def put(self, path, body):
+ return self.http("PUT", "/api%s" % path, body)
+
+ def post(self, path, body):
+ return self.http("POST", "/api%s" % path, body)
+
+ def delete(self, path):
+ return self.http("DELETE", "/api%s" % path, "")
+
+ def http(self, method, path, body):
+ if self.options.ssl:
+ conn = httplib.HTTPSConnection(self.options.hostname,
+ self.options.port,
+ self.options.ssl_key_file,
+ self.options.ssl_cert_file)
+ else:
+ conn = httplib.HTTPConnection(self.options.hostname,
+ self.options.port)
+ headers = {"Authorization":
+ "Basic " + base64.b64encode(self.options.username + ":" +
+ self.options.password)}
+ if body != "":
+ headers["Content-Type"] = "application/json"
+ try:
+ conn.request(method, path, body, headers)
+ except socket.error, e:
+ die("Could not connect: {0}".format(e))
+ resp = conn.getresponse()
+ if resp.status == 400:
+ die(json.loads(resp.read())['reason'])
+ if resp.status == 401:
+ die("Access refused: {0}".format(path))
+ if resp.status == 404:
+ die("Not found: {0}".format(path))
+ if resp.status == 301:
+ url = urlparse.urlparse(resp.getheader('location'))
+ [host, port] = url.netloc.split(':')
+ self.options.hostname = host
+ self.options.port = int(port)
+ return self.http(method, url.path + '?' + url.query, body)
+ if resp.status < 200 or resp.status > 400:
+ raise Exception("Received %d %s for path %s\n%s"
+ % (resp.status, resp.reason, path, resp.read()))
+ return resp.read()
+
+ def verbose(self, string):
+ if self.options.verbose:
+ output(string)
+
+ def get_arg(self):
+ assert_usage(len(self.args) == 1, 'Exactly one argument required')
+ return self.args[0]
+
+ def invoke_help(self):
+ if len(self.args) == 0:
+ parser.print_help()
+ else:
+ help_cmd = self.get_arg()
+ if help_cmd == 'subcommands':
+ usage = subcommands_usage()
+ elif help_cmd == 'config':
+ usage = config_usage()
+ else:
+ assert_usage(False, """help topic must be one of:
+ subcommands
+ config""")
+ print usage
+ exit(0)
+
+ def invoke_publish(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['publish'])
+ upload['properties'] = {} # TODO do we care here?
+ if not 'payload' in upload:
+ data = sys.stdin.read()
+ upload['payload'] = base64.b64encode(data)
+ upload['payload_encoding'] = 'base64'
+ resp = json.loads(self.post(uri, json.dumps(upload)))
+ if resp['routed']:
+ self.verbose("Message published")
+ else:
+ self.verbose("Message published but NOT routed")
+
+ def invoke_get(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['get'])
+ payload_file = 'payload_file' in upload and upload['payload_file'] or None
+ assert_usage(not payload_file or upload['count'] == '1',
+ 'Cannot get multiple messages using payload_file')
+ result = self.post(uri, json.dumps(upload))
+ if payload_file:
+ write_payload_file(payload_file, result)
+ columns = ['routing_key', 'exchange', 'message_count',
+ 'payload_bytes', 'redelivered']
+ format_list(result, columns, {}, self.options)
+ else:
+ format_list(result, [], {}, self.options)
+
+ def invoke_export(self):
+ path = self.get_arg()
+ definitions = self.get("/definitions")
+ f = open(path, 'w')
+ f.write(definitions)
+ f.close()
+ self.verbose("Exported definitions for %s to \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_import(self):
+ path = self.get_arg()
+ f = open(path, 'r')
+ definitions = f.read()
+ f.close()
+ self.post("/definitions", definitions)
+ self.verbose("Imported definitions for %s from \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_list(self):
+ cols = self.args[1:]
+ (uri, obj_info) = self.list_show_uri(LISTABLE, 'list', cols)
+ format_list(self.get(uri), cols, obj_info, self.options)
+
+ def invoke_show(self):
+ cols = self.args[1:]
+ (uri, obj_info) = self.list_show_uri(SHOWABLE, 'show', cols)
+ format_list('[{0}]'.format(self.get(uri)), cols, obj_info, self.options)
+
+ def list_show_uri(self, obj_types, verb, cols):
+ obj_type = self.args[0]
+ assert_usage(obj_type in obj_types,
+ "Don't know how to {0} {1}".format(verb, obj_type))
+ obj_info = obj_types[obj_type]
+ uri = "/%s" % obj_type
+ query = []
+ if obj_info['vhost'] and self.options.vhost:
+ uri += "/%s" % urllib.quote_plus(self.options.vhost)
+ if cols != []:
+ query.append("columns=" + ",".join(cols))
+ sort = self.options.sort
+ if sort:
+ query.append("sort=" + sort)
+ if self.options.sort_reverse:
+ query.append("sort_reverse=true")
+ query = "&".join(query)
+ if query != "":
+ uri += "?" + query
+ return (uri, obj_info)
+
+ def invoke_declare(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DECLARABLE)
+ if obj_type == 'binding':
+ self.post(uri, json.dumps(upload))
+ else:
+ self.put(uri, json.dumps(upload))
+ self.verbose("{0} declared".format(obj_type))
+
+ def invoke_delete(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DELETABLE)
+ self.delete(uri)
+ self.verbose("{0} deleted".format(obj_type))
+
+ def invoke_close(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(CLOSABLE)
+ self.delete(uri)
+ self.verbose("{0} closed".format(obj_type))
+
+ def invoke_purge(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(PURGABLE)
+ self.delete(uri)
+ self.verbose("{0} purged".format(obj_type))
+
+ def declare_delete_parse(self, root):
+ assert_usage(len(self.args) > 0, 'Type not specified')
+ obj_type = self.args[0]
+ assert_usage(obj_type in root,
+ 'Type {0} not recognised'.format(obj_type))
+ obj = root[obj_type]
+ (uri, upload) = self.parse_args(self.args[1:], obj)
+ return (obj_type, uri, upload)
+
+ def parse_args(self, args, obj):
+ mandatory = obj['mandatory']
+ optional = obj['optional']
+ uri_template = obj['uri']
+ upload = {}
+ for k in optional.keys():
+ if optional[k]:
+ upload[k] = optional[k]
+ for arg in args:
+ assert_usage("=" in arg,
+ 'Argument "{0}" not in format name=value'.format(arg))
+ (name, value) = arg.split("=", 1)
+ assert_usage(name in mandatory or name in optional.keys(),
+ 'Argument "{0}" not recognised'.format(name))
+ if 'json' in obj and name in obj['json']:
+ upload[name] = self.parse_json(value)
+ else:
+ upload[name] = value
+ for m in mandatory:
+ assert_usage(m in upload.keys(),
+ 'mandatory argument "{0}" required'.format(m))
+ if 'vhost' not in mandatory:
+ upload['vhost'] = self.options.vhost or self.options.declare_vhost
+ uri_args = {}
+ for k in upload:
+ v = upload[k]
+ if v and isinstance(v, basestring):
+ uri_args[k] = urllib.quote_plus(v)
+ if k == 'destination_type':
+ uri_args['destination_char'] = v[0]
+ uri = uri_template.format(**uri_args)
+ return (uri, upload)
+
+ def parse_json(self, text):
+ try:
+ return json.loads(text)
+ except ValueError:
+ print "Could not parse JSON:\n {0}".format(text)
+ sys.exit(1)
+
+def format_list(json_list, columns, args, options):
+ format = options.format
+ formatter = None
+ if format == "raw_json":
+ output(json_list)
+ return
+ elif format == "pretty_json":
+ enc = json.JSONEncoder(False, False, True, True, True, 2)
+ output(enc.encode(json.loads(json_list)))
+ return
+ else:
+ formatter = FORMATS[format]
+ assert_usage(formatter != None,
+ "Format {0} not recognised".format(format))
+ formatter_instance = formatter(columns, args, options)
+ formatter_instance.display(json_list)
+
+class Lister:
+ def verbose(self, string):
+ if self.options.verbose:
+ output(string)
+
+ def display(self, json_list):
+ depth = sys.maxint
+ if len(self.columns) == 0:
+ depth = int(self.options.depth)
+ (columns, table) = self.list_to_table(json.loads(json_list), depth)
+ if len(table) > 0:
+ self.display_list(columns, table)
+ else:
+ self.verbose("No items")
+
+ def list_to_table(self, items, max_depth):
+ columns = {}
+ column_ix = {}
+ row = None
+ table = []
+
+ def add(prefix, depth, item, fun):
+ for key in item:
+ column = prefix == '' and key or (prefix + '.' + key)
+ subitem = item[key]
+ if type(subitem) == dict:
+ if self.obj_info.has_key('json') and key in self.obj_info['json']:
+ fun(column, json.dumps(subitem))
+ else:
+ if depth < max_depth:
+ add(column, depth + 1, subitem, fun)
+ elif type(subitem) == list:
+ # The first branch has mirror nodes in queues in
+ # mind (which come out looking decent); the second
+ # one has applications in nodes (which look less
+ # so, but what would look good?).
+ if [x for x in subitem if type(x) != unicode] == []:
+ serialised = " ".join(subitem)
+ else:
+ serialised = json.dumps(subitem)
+ fun(column, serialised)
+ else:
+ fun(column, subitem)
+
+ def add_to_columns(col, val):
+ columns[col] = True
+
+ def add_to_row(col, val):
+ if col in column_ix:
+ row[column_ix[col]] = unicode(val)
+
+ if len(self.columns) == 0:
+ for item in items:
+ add('', 1, item, add_to_columns)
+ columns = columns.keys()
+ columns.sort(key=column_sort_key)
+ else:
+ columns = self.columns
+
+ for i in xrange(0, len(columns)):
+ column_ix[columns[i]] = i
+ for item in items:
+ row = len(columns) * ['']
+ add('', 1, item, add_to_row)
+ table.append(row)
+
+ return (columns, table)
+
+class TSVList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ head = "\t".join(columns)
+ self.verbose(head)
+
+ for row in table:
+ line = "\t".join(row)
+ output(line)
+
+class LongList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ sep = "\n" + "-" * 80 + "\n"
+ max_width = 0
+ for col in columns:
+ max_width = max(max_width, len(col))
+ fmt = "{0:>" + unicode(max_width) + "}: {1}"
+ output(sep)
+ for i in xrange(0, len(table)):
+ for j in xrange(0, len(columns)):
+ output(fmt.format(columns[j], table[i][j]))
+ output(sep)
+
+class TableList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ total = [columns]
+ total.extend(table)
+ self.ascii_table(total)
+
+ def ascii_table(self, rows):
+ table = ""
+ col_widths = [0] * len(rows[0])
+ for i in xrange(0, len(rows[0])):
+ for j in xrange(0, len(rows)):
+ col_widths[i] = max(col_widths[i], len(rows[j][i]))
+ self.ascii_bar(col_widths)
+ self.ascii_row(col_widths, rows[0], "^")
+ self.ascii_bar(col_widths)
+ for row in rows[1:]:
+ self.ascii_row(col_widths, row, "<")
+ self.ascii_bar(col_widths)
+
+ def ascii_row(self, col_widths, row, align):
+ txt = "|"
+ for i in xrange(0, len(col_widths)):
+ fmt = " {0:" + align + unicode(col_widths[i]) + "} "
+ txt += fmt.format(row[i]) + "|"
+ output(txt)
+
+ def ascii_bar(self, col_widths):
+ txt = "+"
+ for w in col_widths:
+ txt += ("-" * (w + 2)) + "+"
+ output(txt)
+
+class KeyValueList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ for i in xrange(0, len(table)):
+ row = []
+ for j in xrange(0, len(columns)):
+ row.append("{0}=\"{1}\"".format(columns[j], table[i][j]))
+ output(" ".join(row))
+
+# TODO handle spaces etc in completable names
+class BashList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ ix = None
+ for i in xrange(0, len(columns)):
+ if columns[i] == 'name':
+ ix = i
+ if ix is not None:
+ res = []
+ for row in table:
+ res.append(row[ix])
+ output(" ".join(res))
+
+FORMATS = {
+ 'raw_json' : None, # Special cased
+ 'pretty_json' : None, # Ditto
+ 'tsv' : TSVList,
+ 'long' : LongList,
+ 'table' : TableList,
+ 'kvp' : KeyValueList,
+ 'bash' : BashList
+}
+
+def write_payload_file(payload_file, json_list):
+ result = json.loads(json_list)[0]
+ payload = result['payload']
+ payload_encoding = result['payload_encoding']
+ f = open(payload_file, 'w')
+ if payload_encoding == 'base64':
+ data = base64.b64decode(payload)
+ else:
+ data = payload
+ f.write(data)
+ f.close()
+
+def print_bash_completion():
+ script = """# This is a bash completion script for rabbitmqadmin.
+# Redirect it to a file, then source it or copy it to /etc/bash_completion.d
+# to get tab completion. rabbitmqadmin must be on your PATH for this to work.
+_rabbitmqadmin()
+{
+ local cur prev opts base
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+
+ opts="list show declare delete close purge import export get publish help"
+ fargs="--help --host --port --vhost --username --password --format --depth --sort --sort-reverse"
+
+ case "${prev}" in
+ list)
+ COMPREPLY=( $(compgen -W '""" + " ".join(LISTABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ show)
+ COMPREPLY=( $(compgen -W '""" + " ".join(SHOWABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ declare)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DECLARABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ delete)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DELETABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ close)
+ COMPREPLY=( $(compgen -W '""" + " ".join(CLOSABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ purge)
+ COMPREPLY=( $(compgen -W '""" + " ".join(PURGABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ export)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ import)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ help)
+ opts="subcommands config"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -H)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ --host)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ -V)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --vhost)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -u)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --username)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -f)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+ --format)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+
+"""
+ for l in LISTABLE:
+ key = l[0:len(l) - 1]
+ script += " " + key + """)
+ opts="$(rabbitmqadmin -q -f bash list """ + l + """)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+"""
+ script += """ *)
+ ;;
+ esac
+
+ COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur}))
+ return 0
+}
+complete -F _rabbitmqadmin rabbitmqadmin
+"""
+ output(script)
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl b/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl
new file mode 100644
index 0000000000..2f56f56189
--- /dev/null
+++ b/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+-module(term_to_binary_compat_prop_SUITE).
+
+-compile(export_all).
+
+-include("rabbit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+
+-define(ITERATIONS_TO_RUN_UNTIL_CONFIDENT, 10000).
+
+all() ->
+ [
+ ensure_term_to_binary_defaults_to_version_1,
+ term_to_binary_latin_atom,
+ queue_name_to_binary
+ ].
+
+erts_gt_8() ->
+ Vsn = erlang:system_info(version),
+ [Maj|_] = string:tokens(Vsn, "."),
+ list_to_integer(Maj) > 8.
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+%% R16B03 defaults term_to_binary version to 0, this test would always fail
+ensure_term_to_binary_defaults_to_version_1(Config) ->
+ CurrentERTS = erlang:system_info(version),
+ MinimumTestedERTS = "6.0",
+ case rabbit_misc:version_compare(CurrentERTS, MinimumTestedERTS, gte) of
+ true ->
+ Property = fun () ->
+ prop_ensure_term_to_binary_defaults_to_version_1(Config)
+ end,
+ rabbit_ct_proper_helpers:run_proper(
+ Property, [],
+ ?ITERATIONS_TO_RUN_UNTIL_CONFIDENT);
+ false ->
+ ct:pal(
+ ?LOW_IMPORTANCE,
+ "This test require ERTS ~p or above, running on ~p~n"
+ "Skipping test...",
+ [MinimumTestedERTS, CurrentERTS])
+ end.
+
+prop_ensure_term_to_binary_defaults_to_version_1(_Config) ->
+ ?FORALL(Term, any(),
+ begin
+ Current = term_to_binary(Term),
+ Compat = term_to_binary_compat:term_to_binary_1(Term),
+ Current =:= Compat
+ end).
+
+term_to_binary_latin_atom(Config) ->
+ Property = fun () -> prop_term_to_binary_latin_atom(Config) end,
+ rabbit_ct_proper_helpers:run_proper(Property, [],
+ ?ITERATIONS_TO_RUN_UNTIL_CONFIDENT).
+
+prop_term_to_binary_latin_atom(_Config) ->
+ ?FORALL(LatinString, list(integer(0, 255)),
+ begin
+ Length = length(LatinString),
+ Atom = list_to_atom(LatinString),
+ Binary = list_to_binary(LatinString),
+ <<131,100, Length:16, Binary/binary>> =:=
+ term_to_binary_compat:term_to_binary_1(Atom)
+ end).
+
+queue_name_to_binary(Config) ->
+ Property = fun () -> prop_queue_name_to_binary(Config) end,
+ rabbit_ct_proper_helpers:run_proper(Property, [],
+ ?ITERATIONS_TO_RUN_UNTIL_CONFIDENT).
+
+
+prop_queue_name_to_binary(_Config) ->
+ ?FORALL({VHost, QName}, {binary(), binary()},
+ begin
+ VHostBSize = byte_size(VHost),
+ NameBSize = byte_size(QName),
+ Expected =
+ <<131, %% Binary format "version"
+ 104, 4, %% 4-element tuple
+ 100, 0, 8, "resource", %% `resource` atom
+ 109, VHostBSize:32, VHost/binary, %% Vhost binary
+ 100, 0, 5, "queue", %% `queue` atom
+ 109, NameBSize:32, QName/binary>>, %% Name binary
+ Resource = rabbit_misc:r(VHost, queue, QName),
+ Current = term_to_binary_compat:term_to_binary_1(Resource),
+ Current =:= Expected
+ end).
diff --git a/deps/rabbit/test/test_util.erl b/deps/rabbit/test/test_util.erl
new file mode 100644
index 0000000000..9a82b0ea1c
--- /dev/null
+++ b/deps/rabbit/test/test_util.erl
@@ -0,0 +1,28 @@
+-module(test_util).
+
+-export([
+ fake_pid/1
+ ]).
+
+
+fake_pid(Node) ->
+ NodeBin = rabbit_data_coercion:to_binary(Node),
+ ThisNodeSize = size(term_to_binary(node())) + 1,
+ Pid = spawn(fun () -> ok end),
+ %% drop the local node data from a local pid
+ <<Pre:ThisNodeSize/binary, LocalPidData/binary>> = term_to_binary(Pid),
+ S = size(NodeBin),
+ %% get the encoding type of the pid
+ <<_:8, Type:8/unsigned, _/binary>> = Pre,
+ %% replace it with the incoming node binary
+ Final = <<131, Type, 100, S:16/unsigned, NodeBin/binary, LocalPidData/binary>>,
+ binary_to_term(Final).
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+fake_pid_test() ->
+ _ = fake_pid(banana),
+ ok.
+
+-endif.
diff --git a/deps/rabbit/test/topic_permission_SUITE.erl b/deps/rabbit/test/topic_permission_SUITE.erl
new file mode 100644
index 0000000000..2f123fd7f6
--- /dev/null
+++ b/deps/rabbit/test/topic_permission_SUITE.erl
@@ -0,0 +1,244 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(topic_permission_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() -> [
+ {sequential_tests, [], [
+ topic_permission_database_access,
+ topic_permission_checks
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(Testcase, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, clear_tables, []),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+clear_tables() ->
+ {atomic, ok} = mnesia:clear_table(rabbit_topic_permission),
+ {atomic, ok} = mnesia:clear_table(rabbit_vhost),
+ {atomic, ok} = mnesia:clear_table(rabbit_user),
+ ok.
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+topic_permission_database_access(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, topic_permission_database_access1, [Config]).
+
+topic_permission_database_access1(_Config) ->
+ 0 = length(ets:tab2list(rabbit_topic_permission)),
+ rabbit_vhost:add(<<"/">>, <<"acting-user">>),
+ rabbit_vhost:add(<<"other-vhost">>, <<"acting-user">>),
+ rabbit_auth_backend_internal:add_user(<<"guest">>, <<"guest">>, <<"acting-user">>),
+ rabbit_auth_backend_internal:add_user(<<"dummy">>, <<"dummy">>, <<"acting-user">>),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"amq.topic">>, "^a", "^a", <<"acting-user">>
+ ),
+ 1 = length(ets:tab2list(rabbit_topic_permission)),
+ 1 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)),
+ 0 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+ 1 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"/">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"other-vhost">>)),
+ 1 = length(rabbit_auth_backend_internal:list_topic_permissions()),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"other-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ ),
+ 2 = length(ets:tab2list(rabbit_topic_permission)),
+ 2 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+ 1 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"/">>)),
+ 1 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"other-vhost">>)),
+ 2 = length(rabbit_auth_backend_internal:list_topic_permissions()),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"topic1">>, "^a", "^a", <<"acting-user">>
+ ),
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"topic2">>, "^a", "^a", <<"acting-user">>
+ ),
+
+ 4 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 3 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"/">>)),
+ 1 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"other-vhost">>)),
+ 4 = length(rabbit_auth_backend_internal:list_topic_permissions()),
+
+ rabbit_auth_backend_internal:clear_topic_permissions(<<"guest">>, <<"other-vhost">>,
+ <<"acting-user">>),
+ 0 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+ 3 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ rabbit_auth_backend_internal:clear_topic_permissions(<<"guest">>, <<"/">>, <<"topic1">>,
+ <<"acting-user">>),
+ 2 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ rabbit_auth_backend_internal:clear_topic_permissions(<<"guest">>, <<"/">>,
+ <<"acting-user">>),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+
+
+ {error, {no_such_user, _}} = (catch rabbit_auth_backend_internal:set_topic_permissions(
+ <<"non-existing-user">>, <<"other-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ )),
+
+ {error, {no_such_vhost, _}} = (catch rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"non-existing-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ )),
+
+ {error, {no_such_user, _}} = (catch rabbit_auth_backend_internal:set_topic_permissions(
+ <<"non-existing-user">>, <<"non-existing-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ )),
+
+ {error, {no_such_user, _}} = (catch rabbit_auth_backend_internal:list_user_topic_permissions(
+ "non-existing-user"
+ )),
+
+ {error, {no_such_vhost, _}} = (catch rabbit_auth_backend_internal:list_vhost_topic_permissions(
+ "non-existing-vhost"
+ )),
+
+ {error, {invalid_regexp, _, _}} = (catch rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"amq.topic">>, "[", "^a", <<"acting-user">>
+ )),
+ ok.
+
+topic_permission_checks(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, topic_permission_checks1, [Config]).
+
+topic_permission_checks1(_Config) ->
+ 0 = length(ets:tab2list(rabbit_topic_permission)),
+ rabbit_misc:execute_mnesia_transaction(fun() ->
+ ok = mnesia:write(rabbit_vhost,
+ vhost:new(<<"/">>, []),
+ write),
+ ok = mnesia:write(rabbit_vhost,
+ vhost:new(<<"other-vhost">>, []),
+ write)
+ end),
+ rabbit_auth_backend_internal:add_user(<<"guest">>, <<"guest">>, <<"acting-user">>),
+ rabbit_auth_backend_internal:add_user(<<"dummy">>, <<"dummy">>, <<"acting-user">>),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"amq.topic">>, "^a", "^a", <<"acting-user">>
+ ),
+ 1 = length(ets:tab2list(rabbit_topic_permission)),
+ 1 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)),
+ 0 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"other-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ ),
+ 2 = length(ets:tab2list(rabbit_topic_permission)),
+ 2 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+
+ User = #auth_user{username = <<"guest">>},
+ Topic = #resource{name = <<"amq.topic">>, virtual_host = <<"/">>,
+ kind = topic},
+ Context = #{routing_key => <<"a.b.c">>},
+ Permissions = [write, read],
+ %% user has access to exchange, routing key matches
+ [true = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic,
+ Perm,
+ Context
+ ) || Perm <- Permissions],
+ %% user has access to exchange, routing key does not match
+ [false = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic,
+ Perm,
+ #{routing_key => <<"x.y.z">>}
+ ) || Perm <- Permissions],
+ %% user has access to exchange but not on this vhost
+ %% let pass when there's no match
+ [true = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic#resource{virtual_host = <<"fancyvhost">>},
+ Perm,
+ Context
+ ) || Perm <- Permissions],
+ %% user does not have access to exchange
+ %% let pass when there's no match
+ [true = rabbit_auth_backend_internal:check_topic_access(
+ #auth_user{username = <<"dummy">>},
+ Topic,
+ Perm,
+ Context
+ ) || Perm <- Permissions],
+
+ %% expand variables
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"other-vhost">>, <<"amq.topic">>,
+ "services.{vhost}.accounts.{username}.notifications",
+ "services.{vhost}.accounts.{username}.notifications", <<"acting-user">>
+ ),
+ %% routing key OK
+ [true = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic#resource{virtual_host = <<"other-vhost">>},
+ Perm,
+ #{routing_key => <<"services.other-vhost.accounts.guest.notifications">>,
+ variable_map => #{
+ <<"username">> => <<"guest">>,
+ <<"vhost">> => <<"other-vhost">>
+ }
+ }
+ ) || Perm <- Permissions],
+ %% routing key KO
+ [false = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic#resource{virtual_host = <<"other-vhost">>},
+ Perm,
+ #{routing_key => <<"services.default.accounts.dummy.notifications">>,
+ variable_map => #{
+ <<"username">> => <<"guest">>,
+ <<"vhost">> => <<"other-vhost">>
+ }
+ }
+ ) || Perm <- Permissions],
+
+ ok.
diff --git a/deps/rabbit/test/unit_access_control_SUITE.erl b/deps/rabbit/test/unit_access_control_SUITE.erl
new file mode 100644
index 0000000000..af8f481083
--- /dev/null
+++ b/deps/rabbit/test/unit_access_control_SUITE.erl
@@ -0,0 +1,445 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_access_control_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests},
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ password_hashing,
+ unsupported_connection_refusal
+ ]},
+ {sequential_tests, [], [
+ login_with_credentials_but_no_password,
+ login_of_passwordless_user,
+ set_tags_for_passwordless_user,
+ change_password,
+ topic_matching,
+ auth_backend_internal_expand_topic_permission,
+ rabbit_direct_extract_extra_auth_props
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% ---------------------------------------------------------------------------
+%% Test Cases
+%% ---------------------------------------------------------------------------
+
+password_hashing(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, password_hashing1, [Config]).
+
+password_hashing1(_Config) ->
+ rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(),
+ application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_md5),
+ rabbit_password_hashing_md5 = rabbit_password:hashing_mod(),
+ application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_sha256),
+ rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(),
+
+ rabbit_password_hashing_sha256 =
+ rabbit_password:hashing_mod(rabbit_password_hashing_sha256),
+ rabbit_password_hashing_md5 =
+ rabbit_password:hashing_mod(rabbit_password_hashing_md5),
+ rabbit_password_hashing_md5 =
+ rabbit_password:hashing_mod(undefined),
+
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ internal_user:new()),
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ internal_user:new({hashing_algorithm, undefined})),
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ internal_user:new({hashing_algorithm, rabbit_password_hashing_md5})),
+
+ rabbit_password_hashing_sha256 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ internal_user:new({hashing_algorithm, rabbit_password_hashing_sha256})),
+
+ passed.
+
+change_password(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, change_password1, [Config]).
+
+change_password1(_Config) ->
+ UserName = <<"test_user">>,
+ Password = <<"test_password">>,
+ case rabbit_auth_backend_internal:lookup_user(UserName) of
+ {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName, <<"acting-user">>);
+ _ -> ok
+ end,
+ ok = application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_md5),
+ ok = rabbit_auth_backend_internal:add_user(UserName, Password, <<"acting-user">>),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+ ok = application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_sha256),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+
+ NewPassword = <<"test_password1">>,
+ ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword,
+ <<"acting-user">>),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, NewPassword}]),
+
+ {refused, _, [UserName]} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+ passed.
+
+
+login_with_credentials_but_no_password(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, login_with_credentials_but_no_password1, [Config]).
+
+login_with_credentials_but_no_password1(_Config) ->
+ Username = <<"login_with_credentials_but_no_password-user">>,
+ Password = <<"login_with_credentials_but_no_password-password">>,
+ ok = rabbit_auth_backend_internal:add_user(Username, Password, <<"acting-user">>),
+
+ try
+ rabbit_auth_backend_internal:user_login_authentication(Username,
+ [{key, <<"value">>}]),
+ ?assert(false)
+ catch exit:{unknown_auth_props, Username, [{key, <<"value">>}]} ->
+ ok
+ end,
+
+ ok = rabbit_auth_backend_internal:delete_user(Username, <<"acting-user">>),
+
+ passed.
+
+%% passwordless users are not supposed to be used with
+%% this backend (and PLAIN authentication mechanism in general)
+login_of_passwordless_user(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, login_of_passwordless_user1, [Config]).
+
+login_of_passwordless_user1(_Config) ->
+ Username = <<"login_of_passwordless_user-user">>,
+ Password = <<"">>,
+ ok = rabbit_auth_backend_internal:add_user(Username, Password, <<"acting-user">>),
+
+ ?assertMatch(
+ {refused, _Message, [Username]},
+ rabbit_auth_backend_internal:user_login_authentication(Username,
+ [{password, <<"">>}])),
+
+ ?assertMatch(
+ {refused, _Format, [Username]},
+ rabbit_auth_backend_internal:user_login_authentication(Username,
+ [{password, ""}])),
+
+ ok = rabbit_auth_backend_internal:delete_user(Username, <<"acting-user">>),
+
+ passed.
+
+
+set_tags_for_passwordless_user(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, set_tags_for_passwordless_user1, [Config]).
+
+set_tags_for_passwordless_user1(_Config) ->
+ Username = <<"set_tags_for_passwordless_user">>,
+ Password = <<"set_tags_for_passwordless_user">>,
+ ok = rabbit_auth_backend_internal:add_user(Username, Password,
+ <<"acting-user">>),
+ ok = rabbit_auth_backend_internal:clear_password(Username,
+ <<"acting-user">>),
+ ok = rabbit_auth_backend_internal:set_tags(Username, [management],
+ <<"acting-user">>),
+
+ {ok, User1} = rabbit_auth_backend_internal:lookup_user(Username),
+ ?assertEqual([management], internal_user:get_tags(User1)),
+
+ ok = rabbit_auth_backend_internal:set_tags(Username, [management, policymaker],
+ <<"acting-user">>),
+
+ {ok, User2} = rabbit_auth_backend_internal:lookup_user(Username),
+ ?assertEqual([management, policymaker], internal_user:get_tags(User2)),
+
+ ok = rabbit_auth_backend_internal:set_tags(Username, [],
+ <<"acting-user">>),
+
+ {ok, User3} = rabbit_auth_backend_internal:lookup_user(Username),
+ ?assertEqual([], internal_user:get_tags(User3)),
+
+ ok = rabbit_auth_backend_internal:delete_user(Username,
+ <<"acting-user">>),
+
+ passed.
+
+
+rabbit_direct_extract_extra_auth_props(_Config) ->
+ {ok, CSC} = code_server_cache:start_link(),
+ % no protocol to extract
+ [] = rabbit_direct:extract_extra_auth_props(
+ {<<"guest">>, <<"guest">>}, <<"/">>, 1,
+ [{name,<<"127.0.0.1:52366 -> 127.0.0.1:1883">>}]),
+ % protocol to extract, but no module to call
+ [] = rabbit_direct:extract_extra_auth_props(
+ {<<"guest">>, <<"guest">>}, <<"/">>, 1,
+ [{protocol, {'PROTOCOL_WITHOUT_MODULE', "1.0"}}]),
+ % see rabbit_dummy_protocol_connection_info module
+ % protocol to extract, module that returns a client ID
+ [{client_id, <<"DummyClientId">>}] = rabbit_direct:extract_extra_auth_props(
+ {<<"guest">>, <<"guest">>}, <<"/">>, 1,
+ [{protocol, {'DUMMY_PROTOCOL', "1.0"}}]),
+ % protocol to extract, but error thrown in module
+ [] = rabbit_direct:extract_extra_auth_props(
+ {<<"guest">>, <<"guest">>}, <<"/">>, -1,
+ [{protocol, {'DUMMY_PROTOCOL', "1.0"}}]),
+ gen_server:stop(CSC),
+ ok.
+
+auth_backend_internal_expand_topic_permission(_Config) ->
+ ExpandMap = #{<<"username">> => <<"guest">>, <<"vhost">> => <<"default">>},
+ %% simple case
+ <<"services/default/accounts/guest/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/{vhost}/accounts/{username}/notifications">>,
+ ExpandMap
+ ),
+ %% replace variable twice
+ <<"services/default/accounts/default/guest/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/{vhost}/accounts/{vhost}/{username}/notifications">>,
+ ExpandMap
+ ),
+ %% nothing to replace
+ <<"services/accounts/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/accounts/notifications">>,
+ ExpandMap
+ ),
+ %% the expand map isn't defined
+ <<"services/{vhost}/accounts/{username}/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/{vhost}/accounts/{username}/notifications">>,
+ undefined
+ ),
+ %% the expand map is empty
+ <<"services/{vhost}/accounts/{username}/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/{vhost}/accounts/{username}/notifications">>,
+ #{}
+ ),
+ ok.
+
+unsupported_connection_refusal(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, unsupported_connection_refusal1, [Config]).
+
+unsupported_connection_refusal1(Config) ->
+ H = ?config(rmq_hostname, Config),
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ [passed = test_unsupported_connection_refusal(H, P, V) ||
+ V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]],
+ passed.
+
+test_unsupported_connection_refusal(H, P, Header) ->
+ {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]),
+ ok = gen_tcp:send(C, Header),
+ {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100),
+ ok = gen_tcp:close(C),
+ passed.
+
+
+%% -------------------------------------------------------------------
+%% Topic matching.
+%% -------------------------------------------------------------------
+
+topic_matching(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, topic_matching1, [Config]).
+
+topic_matching1(_Config) ->
+ XName = #resource{virtual_host = <<"/">>,
+ kind = exchange,
+ name = <<"topic_matching-exchange">>},
+ X0 = #exchange{name = XName, type = topic, durable = false,
+ auto_delete = false, arguments = []},
+ X = rabbit_exchange_decorator:set(X0),
+ %% create
+ rabbit_exchange_type_topic:validate(X),
+ exchange_op_callback(X, create, []),
+
+ %% add some bindings
+ Bindings = [#binding{source = XName,
+ key = list_to_binary(Key),
+ destination = #resource{virtual_host = <<"/">>,
+ kind = queue,
+ name = list_to_binary(Q)},
+ args = Args} ||
+ {Key, Q, Args} <- [{"a.b.c", "t1", []},
+ {"a.*.c", "t2", []},
+ {"a.#.b", "t3", []},
+ {"a.b.b.c", "t4", []},
+ {"#", "t5", []},
+ {"#.#", "t6", []},
+ {"#.b", "t7", []},
+ {"*.*", "t8", []},
+ {"a.*", "t9", []},
+ {"*.b.c", "t10", []},
+ {"a.#", "t11", []},
+ {"a.#.#", "t12", []},
+ {"b.b.c", "t13", []},
+ {"a.b.b", "t14", []},
+ {"a.b", "t15", []},
+ {"b.c", "t16", []},
+ {"", "t17", []},
+ {"*.*.*", "t18", []},
+ {"vodka.martini", "t19", []},
+ {"a.b.c", "t20", []},
+ {"*.#", "t21", []},
+ {"#.*.#", "t22", []},
+ {"*.#.#", "t23", []},
+ {"#.#.#", "t24", []},
+ {"*", "t25", []},
+ {"#.b.#", "t26", []},
+ {"args-test", "t27",
+ [{<<"foo">>, longstr, <<"bar">>}]},
+ {"args-test", "t27", %% Note aliasing
+ [{<<"foo">>, longstr, <<"baz">>}]}]],
+ lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end,
+ Bindings),
+
+ %% test some matches
+ test_topic_expect_match(
+ X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12",
+ "t18", "t20", "t21", "t22", "t23", "t24",
+ "t26"]},
+ {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11",
+ "t12", "t15", "t21", "t22", "t23", "t24",
+ "t26"]},
+ {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14",
+ "t18", "t21", "t22", "t23", "t24", "t26"]},
+ {"", ["t5", "t6", "t17", "t24"]},
+ {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23",
+ "t24", "t26"]},
+ {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22",
+ "t23", "t24"]},
+ {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23",
+ "t24"]},
+ {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23",
+ "t24"]},
+ {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21",
+ "t22", "t23", "t24", "t26"]},
+ {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]},
+ {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24",
+ "t25"]},
+ {"args-test", ["t5", "t6", "t21", "t22", "t23", "t24",
+ "t25", "t27"]}]),
+ %% remove some bindings
+ RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings),
+ lists:nth(11, Bindings), lists:nth(19, Bindings),
+ lists:nth(21, Bindings), lists:nth(28, Bindings)],
+ exchange_op_callback(X, remove_bindings, [RemovedBindings]),
+ RemainingBindings = ordsets:to_list(
+ ordsets:subtract(ordsets:from_list(Bindings),
+ ordsets:from_list(RemovedBindings))),
+
+ %% test some matches
+ test_topic_expect_match(
+ X,
+ [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22",
+ "t23", "t24", "t26"]},
+ {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15",
+ "t22", "t23", "t24", "t26"]},
+ {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22",
+ "t23", "t24", "t26"]},
+ {"", ["t6", "t17", "t24"]},
+ {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]},
+ {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]},
+ {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]},
+ {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]},
+ {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23",
+ "t24", "t26"]},
+ {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]},
+ {"oneword", ["t6", "t22", "t23", "t24", "t25"]},
+ {"args-test", ["t6", "t22", "t23", "t24", "t25", "t27"]}]),
+
+ %% remove the entire exchange
+ exchange_op_callback(X, delete, [RemainingBindings]),
+ %% none should match now
+ test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]),
+ passed.
+
+exchange_op_callback(X, Fun, Args) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end),
+ rabbit_exchange:callback(X, Fun, none, [X] ++ Args).
+
+test_topic_expect_match(X, List) ->
+ lists:foreach(
+ fun ({Key, Expected}) ->
+ BinKey = list_to_binary(Key),
+ Message = rabbit_basic:message(X#exchange.name, BinKey,
+ #'P_basic'{}, <<>>),
+ Res = rabbit_exchange_type_topic:route(
+ X, #delivery{mandatory = false,
+ sender = self(),
+ message = Message}),
+ ExpectedRes = lists:map(
+ fun (Q) -> #resource{virtual_host = <<"/">>,
+ kind = queue,
+ name = list_to_binary(Q)}
+ end, Expected),
+ true = (lists:usort(ExpectedRes) =:= lists:usort(Res))
+ end, List).
diff --git a/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl b/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl
new file mode 100644
index 0000000000..9cb1ad7267
--- /dev/null
+++ b/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl
@@ -0,0 +1,127 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_access_control_authn_authz_context_propagation_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ propagate_context_to_auth_backend
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ AuthConfig = {rabbit, [
+ {auth_backends, [rabbit_auth_backend_context_propagation_mock]}
+ ]
+ },
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun(Conf) -> merge_app_env(AuthConfig, Conf) end ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+merge_app_env(SomeConfig, Config) ->
+ rabbit_ct_helpers:merge_app_env(Config, SomeConfig).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+propagate_context_to_auth_backend(Config) ->
+ ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config,
+ rabbit_auth_backend_context_propagation_mock),
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, propagate_context_to_auth_backend1, []).
+
+propagate_context_to_auth_backend1() ->
+ rabbit_auth_backend_context_propagation_mock:init(),
+ AmqpParams = #amqp_params_direct{
+ virtual_host = <<"/">>,
+ username = <<"guest">>,
+ password = <<"guest">>,
+ adapter_info = #amqp_adapter_info{additional_info = [
+ {variable_map, #{<<"key1">> => <<"value1">>}}
+ ],
+ protocol = {'FOO_PROTOCOL', '1.0'} %% this will trigger a call to rabbit_foo_protocol_connection_info
+ }
+ },
+ {ok, Conn} = amqp_connection:start(AmqpParams),
+
+ %% rabbit_direct will call the rabbit_foo_protocol_connection_info module to extract information
+ %% this information will be propagated to the authentication backend
+ [{authentication, AuthProps}] = rabbit_auth_backend_context_propagation_mock:get(authentication),
+ ?assertEqual(<<"value1">>, proplists:get_value(key1, AuthProps)),
+
+ %% variable_map is propagated from rabbit_direct to the authorization backend
+ [{vhost_access, AuthzData}] = rabbit_auth_backend_context_propagation_mock:get(vhost_access),
+ ?assertEqual(<<"value1">>, maps:get(<<"key1">>, AuthzData)),
+
+ %% variable_map is extracted when the channel is created and kept in its state
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QName = <<"channel_propagate_context_to_authz_backend-q">>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName}),
+
+ check_send_receive(Ch, <<"">>, QName, QName),
+ amqp_channel:call(Ch, #'queue.bind'{queue = QName, exchange = <<"amq.topic">>, routing_key = <<"a.b">>}),
+ %% variable_map content is propagated from rabbit_channel to the authorization backend (resource check)
+ [{resource_access, AuthzContext}] = rabbit_auth_backend_context_propagation_mock:get(resource_access),
+ ?assertEqual(<<"value1">>, maps:get(<<"key1">>, AuthzContext)),
+
+ check_send_receive(Ch, <<"amq.topic">>, <<"a.b">>, QName),
+ %% variable_map is propagated from rabbit_channel to the authorization backend (topic check)
+ [{topic_access, TopicContext}] = rabbit_auth_backend_context_propagation_mock:get(topic_access),
+ VariableMap = maps:get(variable_map, TopicContext),
+ ?assertEqual(<<"value1">>, maps:get(<<"key1">>, VariableMap)),
+
+ passed.
+
+check_send_receive(Ch, Exchange, RoutingKey, QName) ->
+ amqp_channel:call(Ch,
+ #'basic.publish'{exchange = Exchange, routing_key = RoutingKey},
+ #amqp_msg{payload = <<"foo">>}),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = <<"foo">>}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName,
+ no_ack = true}).
diff --git a/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl b/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl
new file mode 100644
index 0000000000..6a6a07836c
--- /dev/null
+++ b/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl
@@ -0,0 +1,269 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_access_control_credential_validation_SUITE).
+
+-compile(export_all).
+-include_lib("proper/include/proper.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, unit},
+ {group, integration}
+ ].
+
+groups() ->
+ [
+ {integration, [], [
+ min_length_integration_fails
+ , regexp_integration_fails
+ , min_length_integration_succeeds
+ , regexp_integration_succeeds
+ , min_length_change_password_integration_fails
+ , regexp_change_password_integration_fails
+ , min_length_change_password_integration_succeeds
+ , regexp_change_password_integration_succeeds
+ ]},
+ {unit, [parallel], [
+ basic_unconditionally_accepting_succeeds,
+ min_length_fails,
+ min_length_succeeds,
+ min_length_proper_fails,
+ min_length_proper_succeeds,
+ regexp_fails,
+ regexp_succeeds,
+ regexp_proper_fails,
+ regexp_proper_succeeds
+ ]}
+].
+
+suite() ->
+ [
+ {timetrap, {minutes, 4}}
+ ].
+
+%%
+%% Setup/teardown
+%%
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(integration, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 1},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps());
+
+init_per_group(unit, Config) ->
+ Config.
+
+end_per_group(integration, Config) ->
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(unit, Config) ->
+ Config.
+
+-define(USERNAME, <<"abc">>).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%%
+%% Test Cases
+%%
+
+basic_unconditionally_accepting_succeeds(_Config) ->
+ F = fun rabbit_credential_validator_accept_everything:validate/2,
+
+ Pwd1 = crypto:strong_rand_bytes(1),
+ ?assertEqual(ok, F(?USERNAME, Pwd1)),
+ Pwd2 = crypto:strong_rand_bytes(5),
+ ?assertEqual(ok, F(?USERNAME, Pwd2)),
+ Pwd3 = crypto:strong_rand_bytes(10),
+ ?assertEqual(ok, F(?USERNAME, Pwd3)),
+ Pwd4 = crypto:strong_rand_bytes(50),
+ ?assertEqual(ok, F(?USERNAME, Pwd4)),
+ Pwd5 = crypto:strong_rand_bytes(100),
+ ?assertEqual(ok, F(?USERNAME, Pwd5)),
+ Pwd6 = crypto:strong_rand_bytes(1000),
+ ?assertEqual(ok, F(?USERNAME, Pwd6)).
+
+min_length_fails(_Config) ->
+ F = fun rabbit_credential_validator_min_password_length:validate/3,
+
+ Pwd1 = crypto:strong_rand_bytes(1),
+ ?assertMatch({error, _}, F(?USERNAME, Pwd1, 5)),
+ Pwd2 = crypto:strong_rand_bytes(5),
+ ?assertMatch({error, _}, F(?USERNAME, Pwd2, 6)),
+ Pwd3 = crypto:strong_rand_bytes(10),
+ ?assertMatch({error, _}, F(?USERNAME, Pwd3, 15)),
+ Pwd4 = crypto:strong_rand_bytes(50),
+ ?assertMatch({error, _}, F(?USERNAME, Pwd4, 60)),
+ Pwd5 = undefined,
+ ?assertMatch({error, _}, F(?USERNAME, Pwd5, 60)),
+ Pwd6 = <<"">>,
+ ?assertMatch({error, _}, F(?USERNAME, Pwd6, 60)).
+
+min_length_succeeds(_Config) ->
+ F = fun rabbit_credential_validator_min_password_length:validate/3,
+
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(1), 1)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(6), 6)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(7), 6)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(20), 20)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(40), 30)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(50), 50)).
+
+min_length_proper_fails(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_min_length_fails_validation/0, [], 500).
+
+min_length_proper_succeeds(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_min_length_passes_validation/0, [], 500).
+
+regexp_fails(_Config) ->
+ F = fun rabbit_credential_validator_password_regexp:validate/3,
+
+ ?assertMatch({error, _}, F(?USERNAME, <<"abc">>, "^xyz")),
+ ?assertMatch({error, _}, F(?USERNAME, <<"abcdef">>, "^xyz")),
+ ?assertMatch({error, _}, F(?USERNAME, <<"abcxyz">>, "^abc\\d+")).
+
+regexp_succeeds(_Config) ->
+ F = fun rabbit_credential_validator_password_regexp:validate/3,
+
+ ?assertEqual(ok, F(?USERNAME, <<"abc">>, "^abc")),
+ ?assertEqual(ok, F(?USERNAME, <<"abcdef">>, "^abc")),
+ ?assertEqual(ok, F(?USERNAME, <<"abc123">>, "^abc\\d+")).
+
+regexp_proper_fails(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_regexp_fails_validation/0, [], 500).
+
+regexp_proper_succeeds(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_regexp_passes_validation/0, [], 500).
+
+min_length_integration_fails(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 50),
+ ?assertMatch(rabbit_credential_validator_min_password_length, validator_backend(Config)),
+ ?assertMatch({error, "minimum required password length is 50"},
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"_">>)).
+
+regexp_integration_fails(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, regexp),
+ ?assertMatch(rabbit_credential_validator_password_regexp, validator_backend(Config)),
+ ?assertMatch({error, _}, rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"_">>)).
+
+min_length_integration_succeeds(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 5),
+ ?assertMatch(rabbit_credential_validator_min_password_length, validator_backend(Config)),
+ ?assertMatch(ok, rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>)).
+
+regexp_integration_succeeds(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, regexp),
+ ?assertMatch(rabbit_credential_validator_password_regexp, validator_backend(Config)),
+ ?assertMatch(ok, rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"xyz12345678901">>)).
+
+min_length_change_password_integration_fails(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 50),
+ ?assertMatch(rabbit_credential_validator_min_password_length, validator_backend(Config)),
+ ?assertMatch({error, "minimum required password length is 50"},
+ rabbit_ct_broker_helpers:change_password(Config, ?USERNAME, <<"_">>)).
+
+regexp_change_password_integration_fails(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, regexp),
+ ?assertMatch(rabbit_credential_validator_password_regexp, validator_backend(Config)),
+ ?assertMatch({error, _}, rabbit_ct_broker_helpers:change_password(Config, ?USERNAME, <<"_">>)).
+
+min_length_change_password_integration_succeeds(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 5),
+ ?assertMatch(rabbit_credential_validator_min_password_length, validator_backend(Config)),
+ ?assertMatch(ok, rabbit_ct_broker_helpers:change_password(Config, ?USERNAME, <<"abcdefghi">>)).
+
+regexp_change_password_integration_succeeds(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, regexp),
+ ?assertMatch(rabbit_credential_validator_password_regexp, validator_backend(Config)),
+ ?assertMatch(ok, rabbit_ct_broker_helpers:change_password(Config, ?USERNAME, <<"xyz12345678901">>)).
+
+%%
+%% PropEr
+%%
+
+prop_min_length_fails_validation() ->
+ N = 5,
+ F = fun rabbit_credential_validator_min_password_length:validate/3,
+ ?FORALL(Val, binary(N),
+ ?FORALL(Length, choose(N + 1, 100),
+ failed_validation(F(?USERNAME, Val, Length + 1)))).
+
+prop_min_length_passes_validation() ->
+ N = 20,
+ F = fun rabbit_credential_validator_min_password_length:validate/3,
+ ?FORALL(Val, binary(N),
+ ?FORALL(Length, choose(1, N - 1),
+ passed_validation(F(?USERNAME, Val, Length)))).
+
+prop_regexp_fails_validation() ->
+ N = 5,
+ F = fun rabbit_credential_validator_password_regexp:validate/3,
+ ?FORALL(Val, binary(N),
+ ?FORALL(Length, choose(N + 1, 100),
+ failed_validation(F(?USERNAME, Val, regexp_that_requires_length_of_at_least(Length + 1))))).
+
+prop_regexp_passes_validation() ->
+ N = 5,
+ F = fun rabbit_credential_validator_password_regexp:validate/3,
+ ?FORALL(Val, binary(N),
+ passed_validation(F(?USERNAME, Val, regexp_that_requires_length_of_at_most(size(Val) + 1)))).
+
+%%
+%% Helpers
+%%
+
+passed_validation(ok) ->
+ true;
+passed_validation({error, _}) ->
+ false.
+
+failed_validation(Result) ->
+ not passed_validation(Result).
+
+regexp_that_requires_length_of_at_least(N) when is_integer(N) ->
+ rabbit_misc:format("^[a-zA-Z0-9]{~p,~p}", [N, N + 10]).
+
+regexp_that_requires_length_of_at_most(N) when is_integer(N) ->
+ rabbit_misc:format("^[a-zA-Z0-9]{0,~p}", [N]).
+
+validator_backend(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_credential_validation, backend, []).
diff --git a/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl b/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl
new file mode 100644
index 0000000000..d483dbdd06
--- /dev/null
+++ b/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl
@@ -0,0 +1,231 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_amqp091_content_framing_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ write_table_with_invalid_existing_type,
+ invalid_existing_headers,
+ disparate_invalid_header_entries_accumulate_separately,
+ corrupt_or_invalid_headers_are_overwritten,
+ invalid_same_header_entry_accumulation,
+ content_framing,
+ content_transcoding,
+ table_codec
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+-define(XDEATH_TABLE,
+ [{<<"reason">>, longstr, <<"blah">>},
+ {<<"queue">>, longstr, <<"foo.bar.baz">>},
+ {<<"exchange">>, longstr, <<"my-exchange">>},
+ {<<"routing-keys">>, array, []}]).
+
+-define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]).
+
+-define(BAD_HEADER(K), {<<K>>, longstr, <<"bad ", K>>}).
+-define(BAD_HEADER2(K, Suf), {<<K>>, longstr, <<"bad ", K, Suf>>}).
+-define(FOUND_BAD_HEADER(K), {<<K>>, array, [{longstr, <<"bad ", K>>}]}).
+
+write_table_with_invalid_existing_type(_Config) ->
+ prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]).
+
+invalid_existing_headers(_Config) ->
+ Headers =
+ prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]),
+ {array, [{table, ?ROUTE_TABLE}]} =
+ rabbit_misc:table_lookup(Headers, <<"header2">>),
+ passed.
+
+disparate_invalid_header_entries_accumulate_separately(_Config) ->
+ BadHeaders = [?BAD_HEADER("header2")],
+ Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders),
+ Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE,
+ [?BAD_HEADER("header1") | Headers]),
+ {table, [?FOUND_BAD_HEADER("header1"),
+ ?FOUND_BAD_HEADER("header2")]} =
+ rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+ passed.
+
+corrupt_or_invalid_headers_are_overwritten(_Config) ->
+ Headers0 = [?BAD_HEADER("header1"),
+ ?BAD_HEADER("x-invalid-headers")],
+ Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0),
+ {table,[?FOUND_BAD_HEADER("header1"),
+ ?FOUND_BAD_HEADER("x-invalid-headers")]} =
+ rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+ passed.
+
+invalid_same_header_entry_accumulation(_Config) ->
+ BadHeader1 = ?BAD_HEADER2("header1", "a"),
+ Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]),
+ Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE,
+ [?BAD_HEADER2("header1", "b") | Headers]),
+ {table, InvalidHeaders} =
+ rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+ {array, [{longstr,<<"bad header1b">>},
+ {longstr,<<"bad header1a">>}]} =
+ rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>),
+ passed.
+
+prepend_check(HeaderKey, HeaderTable, Headers) ->
+ Headers1 = rabbit_basic:prepend_table_header(
+ HeaderKey, HeaderTable, Headers),
+ {table, Invalid} =
+ rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+ {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey),
+ {array, [{Type, Value} | _]} =
+ rabbit_misc:table_lookup(Invalid, HeaderKey),
+ Headers1.
+
+
+%% Test that content frames don't exceed frame-max
+content_framing(_Config) ->
+ %% no content
+ passed = test_content_framing(4096, <<>>),
+ %% easily fit in one frame
+ passed = test_content_framing(4096, <<"Easy">>),
+ %% exactly one frame (empty frame = 8 bytes)
+ passed = test_content_framing(11, <<"One">>),
+ %% more than one frame
+ passed = test_content_framing(11, <<"More than one frame">>),
+ passed.
+
+test_content_framing(FrameMax, BodyBin) ->
+ [Header | Frames] =
+ rabbit_binary_generator:build_simple_content_frames(
+ 1,
+ rabbit_binary_generator:ensure_content_encoded(
+ rabbit_basic:build_content(#'P_basic'{}, BodyBin),
+ rabbit_framing_amqp_0_9_1),
+ FrameMax,
+ rabbit_framing_amqp_0_9_1),
+ %% header is formatted correctly and the size is the total of the
+ %% fragments
+ <<_FrameHeader:7/binary, _ClassAndWeight:4/binary,
+ BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header),
+ BodySize = size(BodyBin),
+ true = lists:all(
+ fun (ContentFrame) ->
+ FrameBinary = list_to_binary(ContentFrame),
+ %% assert
+ <<_TypeAndChannel:3/binary,
+ Size:32/unsigned, _Payload:Size/binary, 16#CE>> =
+ FrameBinary,
+ size(FrameBinary) =< FrameMax
+ end, Frames),
+ passed.
+
+content_transcoding(_Config) ->
+ %% there are no guarantees provided by 'clear' - it's just a hint
+ ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1,
+ ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1,
+ EnsureDecoded =
+ fun (C0) ->
+ C1 = rabbit_binary_parser:ensure_content_decoded(C0),
+ true = C1#content.properties =/= none,
+ C1
+ end,
+ EnsureEncoded =
+ fun (Protocol) ->
+ fun (C0) ->
+ C1 = rabbit_binary_generator:ensure_content_encoded(
+ C0, Protocol),
+ true = C1#content.properties_bin =/= none,
+ C1
+ end
+ end,
+ %% Beyond the assertions in Ensure*, the only testable guarantee
+ %% is that the operations should never fail.
+ %%
+ %% If we were using quickcheck we'd simply stuff all the above
+ %% into a generator for sequences of operations. In the absence of
+ %% quickcheck we pick particularly interesting sequences that:
+ %%
+ %% - execute every op twice since they are idempotent
+ %% - invoke clear_decoded, clear_encoded, decode and transcode
+ %% with one or both of decoded and encoded content present
+ [begin
+ sequence_with_content([Op]),
+ sequence_with_content([ClearEncoded, Op]),
+ sequence_with_content([ClearDecoded, Op])
+ end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded,
+ EnsureEncoded(rabbit_framing_amqp_0_9_1),
+ EnsureEncoded(rabbit_framing_amqp_0_8)]],
+ passed.
+
+sequence_with_content(Sequence) ->
+ lists:foldl(fun (F, V) -> F(F(V)) end,
+ rabbit_binary_generator:ensure_content_encoded(
+ rabbit_basic:build_content(#'P_basic'{}, <<>>),
+ rabbit_framing_amqp_0_9_1),
+ Sequence).
+
+table_codec(_Config) ->
+ %% Note: this does not test inexact numbers (double and float) at the moment.
+ %% They won't pass the equality assertions.
+ Table = [{<<"longstr">>, longstr, <<"Here is a long string">>},
+ {<<"signedint">>, signedint, 12345},
+ {<<"decimal">>, decimal, {3, 123456}},
+ {<<"timestamp">>, timestamp, 109876543209876},
+ {<<"table">>, table, [{<<"one">>, signedint, 54321},
+ {<<"two">>, longstr,
+ <<"A long string">>}]},
+ {<<"byte">>, byte, -128},
+ {<<"long">>, long, 1234567890},
+ {<<"short">>, short, 655},
+ {<<"bool">>, bool, true},
+ {<<"binary">>, binary, <<"a binary string">>},
+ {<<"unsignedbyte">>, unsignedbyte, 250},
+ {<<"unsignedshort">>, unsignedshort, 65530},
+ {<<"unsignedint">>, unsignedint, 4294967290},
+ {<<"void">>, void, undefined},
+ {<<"array">>, array, [{signedint, 54321},
+ {longstr, <<"A long string">>}]}
+ ],
+ Binary = <<
+ 7,"longstr", "S", 21:32, "Here is a long string",
+ 9,"signedint", "I", 12345:32/signed,
+ 7,"decimal", "D", 3, 123456:32,
+ 9,"timestamp", "T", 109876543209876:64,
+ 5,"table", "F", 31:32, % length of table
+ 3,"one", "I", 54321:32,
+ 3,"two", "S", 13:32, "A long string",
+ 4,"byte", "b", -128:8/signed,
+ 4,"long", "l", 1234567890:64,
+ 5,"short", "s", 655:16,
+ 4,"bool", "t", 1,
+ 6,"binary", "x", 15:32, "a binary string",
+ 12,"unsignedbyte", "B", 250:8/unsigned,
+ 13,"unsignedshort", "u", 65530:16/unsigned,
+ 11,"unsignedint", "i", 4294967290:32/unsigned,
+ 4,"void", "V",
+ 5,"array", "A", 23:32,
+ "I", 54321:32,
+ "S", 13:32, "A long string"
+ >>,
+ Binary = rabbit_binary_generator:generate_table(Table),
+ Table = rabbit_binary_parser:parse_table(Binary),
+ passed.
diff --git a/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl b/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl
new file mode 100644
index 0000000000..036fb8ce28
--- /dev/null
+++ b/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl
@@ -0,0 +1,144 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_amqp091_server_properties_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT_LIST_OPS_PASS, 5000).
+-define(TIMEOUT, 30000).
+-define(TIMEOUT_CHANNEL_EXCEPTION, 5000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ configurable_server_properties
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 2,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [])
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+configurable_server_properties(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, configurable_server_properties1, [Config]).
+
+configurable_server_properties1(_Config) ->
+ %% List of the names of the built-in properties do we expect to find
+ BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>,
+ <<"copyright">>, <<"information">>],
+
+ Protocol = rabbit_framing_amqp_0_9_1,
+
+ %% Verify that the built-in properties are initially present
+ ActualPropNames = [Key || {Key, longstr, _} <-
+ rabbit_reader:server_properties(Protocol)],
+ true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end,
+ BuiltInPropNames),
+
+ %% Get the initial server properties configured in the environment
+ {ok, ServerProperties} = application:get_env(rabbit, server_properties),
+
+ %% Helper functions
+ ConsProp = fun (X) -> application:set_env(rabbit,
+ server_properties,
+ [X | ServerProperties]) end,
+ IsPropPresent =
+ fun (X) ->
+ lists:member(X, rabbit_reader:server_properties(Protocol))
+ end,
+
+ %% Add a wholly new property of the simplified {KeyAtom, StringValue} form
+ NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"},
+ ConsProp(NewSimplifiedProperty),
+ %% Do we find hare soup, appropriately formatted in the generated properties?
+ ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)),
+ longstr,
+ list_to_binary(NewHareVal)},
+ true = IsPropPresent(ExpectedHareImage),
+
+ %% Add a wholly new property of the {BinaryKey, Type, Value} form
+ %% and check for it
+ NewProperty = {<<"new-bin-key">>, signedint, -1},
+ ConsProp(NewProperty),
+ %% Do we find the new property?
+ true = IsPropPresent(NewProperty),
+
+ %% Add a property that clobbers a built-in, and verify correct clobbering
+ {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."},
+ {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)),
+ list_to_binary(NewVerVal)},
+ ConsProp(NewVersion),
+ ClobberedServerProps = rabbit_reader:server_properties(Protocol),
+ %% Is the clobbering insert present?
+ true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}),
+ %% Is the clobbering insert the only thing with the clobbering key?
+ [{BinNewVerKey, longstr, BinNewVerVal}] =
+ [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey],
+
+ application:set_env(rabbit, server_properties, ServerProperties),
+ passed.
diff --git a/deps/rabbit/test/unit_app_management_SUITE.erl b/deps/rabbit/test/unit_app_management_SUITE.erl
new file mode 100644
index 0000000000..e08f151d57
--- /dev/null
+++ b/deps/rabbit/test/unit_app_management_SUITE.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_app_management_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ app_management
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Application management.
+%% -------------------------------------------------------------------
+
+app_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, app_management1, [Config]).
+
+app_management1(_Config) ->
+ wait_for_application(rabbit),
+ %% Starting, stopping and diagnostics. Note that we don't try
+ %% 'report' when the rabbit app is stopped and that we enable
+ %% tracing for the duration of this function.
+ ok = rabbit_trace:start(<<"/">>),
+ ok = rabbit:stop(),
+ ok = rabbit:stop(),
+ ok = no_exceptions(rabbit, status, []),
+ ok = no_exceptions(rabbit, environment, []),
+ ok = rabbit:start(),
+ ok = rabbit:start(),
+ ok = no_exceptions(rabbit, status, []),
+ ok = no_exceptions(rabbit, environment, []),
+ ok = rabbit_trace:stop(<<"/">>),
+ passed.
+
+no_exceptions(Mod, Fun, Args) ->
+ try erlang:apply(Mod, Fun, Args) of _ -> ok
+ catch Type:Ex -> {Type, Ex}
+ end.
+
+wait_for_application(Application) ->
+ wait_for_application(Application, 5000).
+
+wait_for_application(_, Time) when Time =< 0 ->
+ {error, timeout};
+wait_for_application(Application, Time) ->
+ Interval = 100,
+ case lists:keyfind(Application, 1, application:which_applications()) of
+ false ->
+ timer:sleep(Interval),
+ wait_for_application(Application, Time - Interval);
+ _ -> ok
+ end.
diff --git a/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl b/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl
new file mode 100644
index 0000000000..41dd685694
--- /dev/null
+++ b/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl
@@ -0,0 +1,71 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(unit_cluster_formation_locking_mocks_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ init_with_lock_exits_after_errors,
+ init_with_lock_ignore_after_errors,
+ init_with_lock_not_supported,
+ init_with_lock_supported
+ ]}
+ ].
+
+init_per_testcase(Testcase, Config) when Testcase == init_with_lock_exits_after_errors;
+ Testcase == init_with_lock_not_supported;
+ Testcase == init_with_lock_supported ->
+ application:set_env(rabbit, cluster_formation,
+ [{peer_discover_backend, peer_discover_classic_config},
+ {lock_acquisition_failure_mode, fail}]),
+ ok = meck:new(rabbit_peer_discovery_classic_config, [passthrough]),
+ Config;
+init_per_testcase(init_with_lock_ignore_after_errors, Config) ->
+ application:set_env(rabbit, cluster_formation,
+ [{peer_discover_backend, peer_discover_classic_config},
+ {lock_acquisition_failure_mode, ignore}]),
+ ok = meck:new(rabbit_peer_discovery_classic_config, [passthrough]),
+ Config.
+
+end_per_testcase(_, _) ->
+ meck:unload(),
+ application:unset_env(rabbit, cluster_formation).
+
+init_with_lock_exits_after_errors(_Config) ->
+ meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {error, "test error"} end),
+ ?assertExit(cannot_acquire_startup_lock, rabbit_mnesia:init_with_lock(2, 10, fun() -> ok end)),
+ ?assert(meck:validate(rabbit_peer_discovery_classic_config)),
+ passed.
+
+init_with_lock_ignore_after_errors(_Config) ->
+ meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {error, "test error"} end),
+ ?assertEqual(ok, rabbit_mnesia:init_with_lock(2, 10, fun() -> ok end)),
+ ?assert(meck:validate(rabbit_peer_discovery_classic_config)),
+ passed.
+
+init_with_lock_not_supported(_Config) ->
+ meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> not_supported end),
+ ?assertEqual(ok, rabbit_mnesia:init_with_lock(2, 10, fun() -> ok end)),
+ ?assert(meck:validate(rabbit_peer_discovery_classic_config)),
+ passed.
+
+init_with_lock_supported(_Config) ->
+ meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {ok, data} end),
+ meck:expect(rabbit_peer_discovery_classic_config, unlock, fun(data) -> ok end),
+ ?assertEqual(ok, rabbit_mnesia:init_with_lock(2, 10, fun() -> ok end)),
+ ?assert(meck:validate(rabbit_peer_discovery_classic_config)),
+ passed.
diff --git a/deps/rabbit/test/unit_collections_SUITE.erl b/deps/rabbit/test/unit_collections_SUITE.erl
new file mode 100644
index 0000000000..1cbf65efce
--- /dev/null
+++ b/deps/rabbit/test/unit_collections_SUITE.erl
@@ -0,0 +1,51 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_collections_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ pmerge,
+ plmerge,
+ unfold
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+pmerge(_Config) ->
+ P = [{a, 1}, {b, 2}],
+ P = rabbit_misc:pmerge(a, 3, P),
+ [{c, 3} | P] = rabbit_misc:pmerge(c, 3, P),
+ passed.
+
+plmerge(_Config) ->
+ P1 = [{a, 1}, {b, 2}, {c, 3}],
+ P2 = [{a, 2}, {d, 4}],
+ [{a, 1}, {b, 2}, {c, 3}, {d, 4}] = rabbit_misc:plmerge(P1, P2),
+ passed.
+
+unfold(_Config) ->
+ {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test),
+ List = lists:seq(2,20,2),
+ {List, 0} = rabbit_misc:unfold(fun (0) -> false;
+ (N) -> {true, N*2, N-1}
+ end, 10),
+ passed.
diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE.erl b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl
new file mode 100644
index 0000000000..7536005797
--- /dev/null
+++ b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl
@@ -0,0 +1,233 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_config_value_encryption_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ decrypt_start_app,
+ decrypt_start_app_file,
+ decrypt_start_app_undefined,
+ decrypt_start_app_wrong_passphrase,
+ decrypt_config,
+ rabbitmqctl_encode
+ ]}
+ ].
+
+init_per_testcase(TC, Config) when TC =:= decrypt_start_app;
+ TC =:= decrypt_start_app_file;
+ TC =:= decrypt_start_app_undefined;
+ TC =:= decrypt_start_app_wrong_passphrase ->
+ application:set_env(rabbit, feature_flags_file, "", [{persistent, true}]),
+ Config;
+init_per_testcase(_Testcase, Config) ->
+ Config.
+
+end_per_testcase(_TC, _Config) ->
+ ok.
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+decrypt_config(_Config) ->
+ %% Take all available block ciphers.
+ Hashes = rabbit_pbe:supported_hashes(),
+ Ciphers = rabbit_pbe:supported_ciphers(),
+ Iterations = [1, 10, 100, 1000],
+ %% Loop through all hashes, ciphers and iterations.
+ _ = [begin
+ PassPhrase = crypto:strong_rand_bytes(16),
+ do_decrypt_config({C, H, I, PassPhrase})
+ end || H <- Hashes, C <- Ciphers, I <- Iterations],
+ ok.
+
+do_decrypt_config(Algo = {C, H, I, P}) ->
+ ok = application:load(rabbit),
+ RabbitConfig = application:get_all_env(rabbit),
+ %% Encrypt a few values in configuration.
+ %% Common cases.
+ _ = [encrypt_value(Key, Algo) || Key <- [
+ tcp_listeners,
+ num_tcp_acceptors,
+ ssl_options,
+ vm_memory_high_watermark,
+ default_pass,
+ default_permissions,
+ cluster_nodes,
+ auth_mechanisms,
+ msg_store_credit_disc_bound]],
+ %% Special case: encrypt a value in a list.
+ {ok, [LoopbackUser]} = application:get_env(rabbit, loopback_users),
+ {encrypted, EncLoopbackUser} = rabbit_pbe:encrypt_term(C, H, I, P, LoopbackUser),
+ application:set_env(rabbit, loopback_users, [{encrypted, EncLoopbackUser}]),
+ %% Special case: encrypt a value in a key/value list.
+ {ok, TCPOpts} = application:get_env(rabbit, tcp_listen_options),
+ {_, Backlog} = lists:keyfind(backlog, 1, TCPOpts),
+ {_, Linger} = lists:keyfind(linger, 1, TCPOpts),
+ {encrypted, EncBacklog} = rabbit_pbe:encrypt_term(C, H, I, P, Backlog),
+ {encrypted, EncLinger} = rabbit_pbe:encrypt_term(C, H, I, P, Linger),
+ TCPOpts1 = lists:keyreplace(backlog, 1, TCPOpts, {backlog, {encrypted, EncBacklog}}),
+ TCPOpts2 = lists:keyreplace(linger, 1, TCPOpts1, {linger, {encrypted, EncLinger}}),
+ application:set_env(rabbit, tcp_listen_options, TCPOpts2),
+ %% Decrypt configuration.
+ rabbit_prelaunch_conf:decrypt_config([rabbit], Algo),
+ %% Check that configuration was decrypted properly.
+ RabbitConfig = application:get_all_env(rabbit),
+ ok = application:unload(rabbit),
+ ok.
+
+encrypt_value(Key, {C, H, I, P}) ->
+ {ok, Value} = application:get_env(rabbit, Key),
+ {encrypted, EncValue} = rabbit_pbe:encrypt_term(C, H, I, P, Value),
+ application:set_env(rabbit, Key, {encrypted, EncValue}).
+
+decrypt_start_app(Config) ->
+ do_decrypt_start_app(Config, "hello").
+
+decrypt_start_app_file(Config) ->
+ do_decrypt_start_app(Config, {file, ?config(data_dir, Config) ++ "/rabbit_shovel_test.passphrase"}).
+
+do_decrypt_start_app(Config, Passphrase) ->
+ %% Configure rabbit for decrypting configuration.
+ application:set_env(rabbit, config_entry_decoder, [
+ {cipher, aes_cbc256},
+ {hash, sha512},
+ {iterations, 1000},
+ {passphrase, Passphrase}
+ ], [{persistent, true}]),
+ %% Add the path to our test application.
+ code:add_path(?config(data_dir, Config) ++ "/lib/rabbit_shovel_test/ebin"),
+ %% Attempt to start our test application.
+ %%
+ %% We expect a failure *after* the decrypting has been done.
+ try
+ rabbit:start_apps([rabbit_shovel_test], #{rabbit => temporary})
+ catch _:_ ->
+ ok
+ end,
+ %% Check if the values have been decrypted.
+ {ok, Shovels} = application:get_env(rabbit_shovel_test, shovels),
+ {_, FirstShovel} = lists:keyfind(my_first_shovel, 1, Shovels),
+ {_, Sources} = lists:keyfind(sources, 1, FirstShovel),
+ {_, Brokers} = lists:keyfind(brokers, 1, Sources),
+ ["amqp://fred:secret@host1.domain/my_vhost",
+ "amqp://john:secret@host2.domain/my_vhost"] = Brokers,
+ ok.
+
+decrypt_start_app_undefined(Config) ->
+ %% Configure rabbit for decrypting configuration.
+ application:set_env(rabbit, config_entry_decoder, [
+ {cipher, aes_cbc256},
+ {hash, sha512},
+ {iterations, 1000}
+ %% No passphrase option!
+ ], [{persistent, true}]),
+ %% Add the path to our test application.
+ code:add_path(?config(data_dir, Config) ++ "/lib/rabbit_shovel_test/ebin"),
+ %% Attempt to start our test application.
+ %%
+ %% We expect a failure during decryption because the passphrase is missing.
+ try
+ rabbit:start_apps([rabbit_shovel_test], #{rabbit => temporary})
+ catch
+ throw:{bad_config_entry_decoder, missing_passphrase} -> ok;
+ _:Exception -> exit({unexpected_exception, Exception})
+ end.
+
+decrypt_start_app_wrong_passphrase(Config) ->
+ %% Configure rabbit for decrypting configuration.
+ application:set_env(rabbit, config_entry_decoder, [
+ {cipher, aes_cbc256},
+ {hash, sha512},
+ {iterations, 1000},
+ {passphrase, "wrong passphrase"}
+ ], [{persistent, true}]),
+ %% Add the path to our test application.
+ code:add_path(?config(data_dir, Config) ++ "/lib/rabbit_shovel_test/ebin"),
+ %% Attempt to start our test application.
+ %%
+ %% We expect a failure during decryption because the passphrase is wrong.
+ try
+ rabbit:start_apps([rabbit_shovel_test], #{rabbit => temporary})
+ catch
+ throw:{config_decryption_error, _, _} -> ok;
+ _:Exception -> exit({unexpected_exception, Exception})
+ end.
+
+rabbitmqctl_encode(_Config) ->
+ % list ciphers and hashes
+ {ok, _} = rabbit_control_pbe:list_ciphers(),
+ {ok, _} = rabbit_control_pbe:list_hashes(),
+ % incorrect ciphers, hashes and iteration number
+ {error, _} = rabbit_control_pbe:encode(funny_cipher, undefined, undefined, undefined),
+ {error, _} = rabbit_control_pbe:encode(undefined, funny_hash, undefined, undefined),
+ {error, _} = rabbit_control_pbe:encode(undefined, undefined, -1, undefined),
+ {error, _} = rabbit_control_pbe:encode(undefined, undefined, 0, undefined),
+ % incorrect number of arguments
+ {error, _} = rabbit_control_pbe:encode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ []
+ ),
+ {error, _} = rabbit_control_pbe:encode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [undefined]
+ ),
+ {error, _} = rabbit_control_pbe:encode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [undefined, undefined, undefined]
+ ),
+
+ % encrypt/decrypt
+ % string
+ rabbitmqctl_encode_encrypt_decrypt("foobar"),
+ % binary
+ rabbitmqctl_encode_encrypt_decrypt("<<\"foobar\">>"),
+ % tuple
+ rabbitmqctl_encode_encrypt_decrypt("{password,<<\"secret\">>}"),
+
+ ok.
+
+rabbitmqctl_encode_encrypt_decrypt(Secret) ->
+ PassPhrase = "passphrase",
+ {ok, Output} = rabbit_control_pbe:encode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [Secret, PassPhrase]
+ ),
+ {encrypted, Encrypted} = rabbit_control_pbe:evaluate_input_as_term(lists:flatten(Output)),
+
+ {ok, Result} = rabbit_control_pbe:decode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [lists:flatten(io_lib:format("~p", [Encrypted])), PassPhrase]
+ ),
+ Secret = lists:flatten(Result),
+ % decrypt with {encrypted, ...} form as input
+ {ok, Result} = rabbit_control_pbe:decode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [lists:flatten(io_lib:format("~p", [{encrypted, Encrypted}])), PassPhrase]
+ ),
+
+ % wrong passphrase
+ {error, _} = rabbit_control_pbe:decode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [lists:flatten(io_lib:format("~p", [Encrypted])), PassPhrase ++ " "]
+ ),
+ {error, _} = rabbit_control_pbe:decode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [lists:flatten(io_lib:format("~p", [{encrypted, Encrypted}])), PassPhrase ++ " "]
+ ).
diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE_data/lib/rabbit_shovel_test/ebin/rabbit_shovel_test.app b/deps/rabbit/test/unit_config_value_encryption_SUITE_data/lib/rabbit_shovel_test/ebin/rabbit_shovel_test.app
new file mode 100644
index 0000000000..a8481c9aa4
--- /dev/null
+++ b/deps/rabbit/test/unit_config_value_encryption_SUITE_data/lib/rabbit_shovel_test/ebin/rabbit_shovel_test.app
@@ -0,0 +1,46 @@
+{application, rabbit_shovel_test,
+ [{description, "Test .app file for tests for encrypting configuration"},
+ {vsn, ""},
+ {modules, []},
+ {env, [ {shovels, [ {my_first_shovel,
+ [ {sources,
+ [ {brokers, [ {encrypted, <<"CfJXuka/uJYsqAtiJnwKpSY4moMPcOBh4sO8XDcdmhXbVYGKCDLKEilWPMfvOAQ2lN1BQneGn6bvDZi2+gDu6iHVKfafQAZSv8zcsVB3uYdBXFzqTCWO8TAsgG6LUMPT">>}
+ , {encrypted, <<"dBO6n+G1OiBwZeLXhvmNYeTE57nhBOmicUBF34zo4nQjerzQaNoEk8GA2Ts5PzMhYeO6U6Y9eEmheqIr9Gzh2duLZic65ZMQtIKNpWcZJllEhGpk7aV1COr23Yur9fWG">>}
+ ]}
+ , {declarations, [ {'exchange.declare',
+ [ {exchange, <<"my_fanout">>}
+ , {type, <<"fanout">>}
+ , durable
+ ]}
+ , {'queue.declare',
+ [{arguments,
+ [{<<"x-message-ttl">>, long, 60000}]}]}
+ , {'queue.bind',
+ [ {exchange, <<"my_direct">>}
+ , {queue, <<>>}
+ ]}
+ ]}
+ ]}
+ , {destinations,
+ [ {broker, "amqp://"}
+ , {declarations, [ {'exchange.declare',
+ [ {exchange, <<"my_direct">>}
+ , {type, <<"direct">>}
+ , durable
+ ]}
+ ]}
+ ]}
+ , {queue, <<>>}
+ , {prefetch_count, 10}
+ , {ack_mode, on_confirm}
+ , {publish_properties, [ {delivery_mode, 2} ]}
+ , {add_forward_headers, true}
+ , {publish_fields, [ {exchange, <<"my_direct">>}
+ , {routing_key, <<"from_shovel">>}
+ ]}
+ , {reconnect_delay, 5}
+ ]}
+ ]}
+ ]},
+
+ {applications, [kernel, stdlib]}]}.
diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE_data/rabbit_shovel_test.passphrase b/deps/rabbit/test/unit_config_value_encryption_SUITE_data/rabbit_shovel_test.passphrase
new file mode 100644
index 0000000000..ce01362503
--- /dev/null
+++ b/deps/rabbit/test/unit_config_value_encryption_SUITE_data/rabbit_shovel_test.passphrase
@@ -0,0 +1 @@
+hello
diff --git a/deps/rabbit/test/unit_connection_tracking_SUITE.erl b/deps/rabbit/test/unit_connection_tracking_SUITE.erl
new file mode 100644
index 0000000000..4ea1744fa7
--- /dev/null
+++ b/deps/rabbit/test/unit_connection_tracking_SUITE.erl
@@ -0,0 +1,119 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_connection_tracking_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ exchange_count,
+ queue_count,
+ connection_count,
+ connection_lookup
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% ---------------------------------------------------------------------------
+%% Count functions for management only API purposes
+%% ---------------------------------------------------------------------------
+
+exchange_count(Config) ->
+ %% Default exchanges == 7
+ ?assertEqual(7, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, count, [])).
+
+queue_count(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:call(Ch, #'queue.declare'{ queue = <<"my-queue">> }),
+
+ ?assertEqual(1, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, count, [])),
+
+ amqp_channel:call(Ch, #'queue.delete'{ queue = <<"my-queue">> }),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ ok.
+
+%% connection_count/1 has been failing on Travis. This seems a legit failure, as the registering
+%% of connections in the tracker is async. `rabbit_connection_tracking_handler` receives a rabbit
+%% event with `connection_created`, which then forwards as a cast to `rabbit_connection_tracker`
+%% for register. We should wait a reasonable amount of time for the counter to increase before
+%% failing.
+connection_count(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_connection_tracking, count, []) == 1
+ end, 30000),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ ok.
+
+connection_lookup(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+
+ %% Let's wait until the connection is registered, otherwise this test could fail in a slow
+ %% machine as connection tracking is asynchronous
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_connection_tracking, count, []) == 1
+ end, 30000),
+
+ [Connection] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_connection_tracking, list, []),
+ ?assertMatch(Connection, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_connection_tracking,
+ lookup,
+ [Connection#tracked_connection.name])),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ ok.
diff --git a/deps/rabbit/test/unit_credit_flow_SUITE.erl b/deps/rabbit/test/unit_credit_flow_SUITE.erl
new file mode 100644
index 0000000000..ffad444dde
--- /dev/null
+++ b/deps/rabbit/test/unit_credit_flow_SUITE.erl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_credit_flow_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ credit_flow_settings
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% ---------------------------------------------------------------------------
+%% Test Cases
+%% ---------------------------------------------------------------------------
+
+credit_flow_settings(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, credit_flow_settings1, [Config]).
+
+credit_flow_settings1(_Config) ->
+ passed = test_proc(400, 200, {400, 200}),
+ passed = test_proc(600, 300),
+ passed.
+
+test_proc(InitialCredit, MoreCreditAfter) ->
+ test_proc(InitialCredit, MoreCreditAfter, {InitialCredit, MoreCreditAfter}).
+test_proc(InitialCredit, MoreCreditAfter, Settings) ->
+ Pid = spawn(?MODULE, dummy, [Settings]),
+ Pid ! {credit, self()},
+ {InitialCredit, MoreCreditAfter} =
+ receive
+ {credit, Val} -> Val
+ end,
+ passed.
+
+dummy(Settings) ->
+ credit_flow:send(self()),
+ receive
+ {credit, From} ->
+ From ! {credit, Settings};
+ _ ->
+ dummy(Settings)
+ end.
diff --git a/deps/rabbit/test/unit_disk_monitor_SUITE.erl b/deps/rabbit/test/unit_disk_monitor_SUITE.erl
new file mode 100644
index 0000000000..bc21114c12
--- /dev/null
+++ b/deps/rabbit/test/unit_disk_monitor_SUITE.erl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_disk_monitor_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ set_disk_free_limit_command
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+set_disk_free_limit_command(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, set_disk_free_limit_command1, [Config]).
+
+set_disk_free_limit_command1(_Config) ->
+ %% Use an integer
+ rabbit_disk_monitor:set_disk_free_limit({mem_relative, 1}),
+ disk_free_limit_to_total_memory_ratio_is(1),
+
+ %% Use a float
+ rabbit_disk_monitor:set_disk_free_limit({mem_relative, 1.5}),
+ disk_free_limit_to_total_memory_ratio_is(1.5),
+
+ %% use an absolute value
+ rabbit_disk_monitor:set_disk_free_limit("70MiB"),
+ ?assertEqual(73400320, rabbit_disk_monitor:get_disk_free_limit()),
+
+ rabbit_disk_monitor:set_disk_free_limit("50MB"),
+ ?assertEqual(50 * 1000 * 1000, rabbit_disk_monitor:get_disk_free_limit()),
+ passed.
+
+disk_free_limit_to_total_memory_ratio_is(MemRatio) ->
+ ExpectedLimit = MemRatio * vm_memory_monitor:get_total_memory(),
+ % Total memory is unstable, so checking order
+ true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() < 1.2,
+ true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() > 0.98.
diff --git a/deps/rabbit/test/unit_disk_monitor_mocks_SUITE.erl b/deps/rabbit/test/unit_disk_monitor_mocks_SUITE.erl
new file mode 100644
index 0000000000..af78d0d134
--- /dev/null
+++ b/deps/rabbit/test/unit_disk_monitor_mocks_SUITE.erl
@@ -0,0 +1,112 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_disk_monitor_mocks_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ disk_monitor,
+ disk_monitor_enable
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+disk_monitor(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, disk_monitor1, [Config]).
+
+disk_monitor1(_Config) ->
+ %% Issue: rabbitmq-server #91
+ %% os module could be mocked using 'unstick', however it may have undesired
+ %% side effects in following tests. Thus, we mock at rabbit_misc level
+ ok = meck:new(rabbit_misc, [passthrough]),
+ ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> "\n" end),
+ ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup),
+ ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]),
+ meck:unload(rabbit_misc),
+ passed.
+
+disk_monitor_enable(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, disk_monitor_enable1, [Config]).
+
+disk_monitor_enable1(_Config) ->
+ ok = meck:new(rabbit_misc, [passthrough]),
+ ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> "\n" end),
+ application:set_env(rabbit, disk_monitor_failure_retries, 20000),
+ application:set_env(rabbit, disk_monitor_failure_retry_interval, 100),
+ ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup),
+ ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]),
+ undefined = rabbit_disk_monitor:get_disk_free(),
+ Cmd = case os:type() of
+ {win32, _} -> " Le volume dans le lecteur C n’a pas de nom.\n"
+ " Le numéro de série du volume est 707D-5BDC\n"
+ "\n"
+ " Répertoire de C:\Users\n"
+ "\n"
+ "10/12/2015 11:01 <DIR> .\n"
+ "10/12/2015 11:01 <DIR> ..\n"
+ " 0 fichier(s) 0 octets\n"
+ " 2 Rép(s) 758537121792 octets libres\n";
+ _ -> "Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted on\n"
+ "/dev/disk1 975798272 234783364 740758908 25% 58759839 185189727 24% /\n"
+ end,
+ ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> Cmd end),
+ timer:sleep(1000),
+ Bytes = 740758908 * 1024,
+ Bytes = rabbit_disk_monitor:get_disk_free(),
+ meck:unload(rabbit_misc),
+ application:set_env(rabbit, disk_monitor_failure_retries, 10),
+ application:set_env(rabbit, disk_monitor_failure_retry_interval, 120000),
+ passed.
diff --git a/deps/rabbit/test/unit_file_handle_cache_SUITE.erl b/deps/rabbit/test/unit_file_handle_cache_SUITE.erl
new file mode 100644
index 0000000000..f2252aa2b5
--- /dev/null
+++ b/deps/rabbit/test/unit_file_handle_cache_SUITE.erl
@@ -0,0 +1,278 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_file_handle_cache_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ file_handle_cache, %% Change FHC limit.
+ file_handle_cache_reserve,
+ file_handle_cache_reserve_release,
+ file_handle_cache_reserve_above_limit,
+ file_handle_cache_reserve_monitor,
+ file_handle_cache_reserve_open_file_above_limit
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun setup_file_handle_cache/1
+ ]).
+
+setup_file_handle_cache(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_file_handle_cache1, []),
+ Config.
+
+setup_file_handle_cache1() ->
+ %% FIXME: Why are we doing this?
+ application:set_env(rabbit, file_handles_high_watermark, 10),
+ ok = file_handle_cache:set_limit(10),
+ ok.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% ---------------------------------------------------------------------------
+%% file_handle_cache.
+%% ---------------------------------------------------------------------------
+
+file_handle_cache(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache1, [Config]).
+
+file_handle_cache1(_Config) ->
+ %% test copying when there is just one spare handle
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores
+ TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
+ ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")),
+ [Src1, Dst1, Src2, Dst2] = Files =
+ [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
+ Content = <<"foo">>,
+ CopyFun = fun (Src, Dst) ->
+ {ok, Hdl} = prim_file:open(Src, [binary, write]),
+ ok = prim_file:write(Hdl, Content),
+ ok = prim_file:sync(Hdl),
+ prim_file:close(Hdl),
+
+ {ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
+ {ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
+ Size = size(Content),
+ {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size),
+ ok = file_handle_cache:delete(SrcHdl),
+ ok = file_handle_cache:delete(DstHdl)
+ end,
+ Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open(
+ filename:join(TmpDir, "file5"),
+ [write], []),
+ receive {next, Pid1} -> Pid1 ! {next, self()} end,
+ file_handle_cache:delete(Hdl),
+ %% This will block and never return, so we
+ %% exercise the fhc tidying up the pending
+ %% queue on the death of a process.
+ ok = CopyFun(Src1, Dst1)
+ end),
+ ok = CopyFun(Src1, Dst1),
+ ok = file_handle_cache:set_limit(2),
+ Pid ! {next, self()},
+ receive {next, Pid} -> ok end,
+ timer:sleep(100),
+ Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end),
+ timer:sleep(100),
+ erlang:monitor(process, Pid),
+ erlang:monitor(process, Pid1),
+ exit(Pid, kill),
+ exit(Pid1, kill),
+ receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end,
+ receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end,
+ [file:delete(File) || File <- Files],
+ ok = file_handle_cache:set_limit(Limit),
+ passed.
+
+file_handle_cache_reserve(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve1, [Config]).
+
+file_handle_cache_reserve1(_Config) ->
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5),
+ %% Reserves are always accepted, even if above the limit
+ %% These are for special processes such as quorum queues
+ ok = file_handle_cache:set_reservation(7),
+
+ Self = self(),
+ spawn(fun () -> ok = file_handle_cache:obtain(),
+ Self ! obtained
+ end),
+
+ Props = file_handle_cache:info([files_reserved, sockets_used]),
+ ?assertEqual(7, proplists:get_value(files_reserved, Props)),
+ ?assertEqual(0, proplists:get_value(sockets_used, Props)),
+
+ %% The obtain should still be blocked, as there are no file handles
+ %% available
+ receive
+ obtained ->
+ throw(error_file_obtained)
+ after 1000 ->
+ %% Let's release 5 file handles, that should leave
+ %% enough free for the `obtain` to go through
+ file_handle_cache:set_reservation(2),
+ Props0 = file_handle_cache:info([files_reserved, sockets_used]),
+ ?assertEqual(2, proplists:get_value(files_reserved, Props0)),
+ ?assertEqual(1, proplists:get_value(sockets_used, Props0)),
+ receive
+ obtained ->
+ ok = file_handle_cache:set_limit(Limit),
+ passed
+ after 5000 ->
+ throw(error_file_not_released)
+ end
+ end.
+
+file_handle_cache_reserve_release(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve_release1, [Config]).
+
+file_handle_cache_reserve_release1(_Config) ->
+ ok = file_handle_cache:set_reservation(7),
+ ?assertEqual([{files_reserved, 7}], file_handle_cache:info([files_reserved])),
+ ok = file_handle_cache:set_reservation(3),
+ ?assertEqual([{files_reserved, 3}], file_handle_cache:info([files_reserved])),
+ ok = file_handle_cache:release_reservation(),
+ ?assertEqual([{files_reserved, 0}], file_handle_cache:info([files_reserved])),
+ passed.
+
+file_handle_cache_reserve_above_limit(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve_above_limit1, [Config]).
+
+file_handle_cache_reserve_above_limit1(_Config) ->
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5),
+ %% Reserves are always accepted, even if above the limit
+ %% These are for special processes such as quorum queues
+ ok = file_handle_cache:obtain(5),
+ ?assertEqual([{file_descriptor_limit, []}], rabbit_alarm:get_alarms()),
+
+ ok = file_handle_cache:set_reservation(7),
+
+ Props = file_handle_cache:info([files_reserved, sockets_used]),
+ ?assertEqual(7, proplists:get_value(files_reserved, Props)),
+ ?assertEqual(5, proplists:get_value(sockets_used, Props)),
+
+ ok = file_handle_cache:set_limit(Limit),
+ passed.
+
+file_handle_cache_reserve_open_file_above_limit(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve_open_file_above_limit1, [Config]).
+
+file_handle_cache_reserve_open_file_above_limit1(_Config) ->
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5),
+ %% Reserves are always accepted, even if above the limit
+ %% These are for special processes such as quorum queues
+ ok = file_handle_cache:set_reservation(7),
+
+ Self = self(),
+ TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
+ spawn(fun () -> {ok, _} = file_handle_cache:open(
+ filename:join(TmpDir, "file_above_limit"),
+ [write], []),
+ Self ! opened
+ end),
+
+ Props = file_handle_cache:info([files_reserved]),
+ ?assertEqual(7, proplists:get_value(files_reserved, Props)),
+
+ %% The open should still be blocked, as there are no file handles
+ %% available
+ receive
+ opened ->
+ throw(error_file_opened)
+ after 1000 ->
+ %% Let's release 5 file handles, that should leave
+ %% enough free for the `open` to go through
+ file_handle_cache:set_reservation(2),
+ Props0 = file_handle_cache:info([files_reserved, total_used]),
+ ?assertEqual(2, proplists:get_value(files_reserved, Props0)),
+ receive
+ opened ->
+ ok = file_handle_cache:set_limit(Limit),
+ passed
+ after 5000 ->
+ throw(error_file_not_released)
+ end
+ end.
+
+file_handle_cache_reserve_monitor(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve_monitor1, [Config]).
+
+file_handle_cache_reserve_monitor1(_Config) ->
+ %% Check that if the process that does the reserve dies, the file handlers are
+ %% released by the cache
+ Self = self(),
+ Pid = spawn(fun () ->
+ ok = file_handle_cache:set_reservation(2),
+ Self ! done,
+ receive
+ stop -> ok
+ end
+ end),
+ receive
+ done -> ok
+ end,
+ ?assertEqual([{files_reserved, 2}], file_handle_cache:info([files_reserved])),
+ Pid ! stop,
+ timer:sleep(500),
+ ?assertEqual([{files_reserved, 0}], file_handle_cache:info([files_reserved])),
+ passed.
diff --git a/deps/rabbit/test/unit_gen_server2_SUITE.erl b/deps/rabbit/test/unit_gen_server2_SUITE.erl
new file mode 100644
index 0000000000..babd340f19
--- /dev/null
+++ b/deps/rabbit/test/unit_gen_server2_SUITE.erl
@@ -0,0 +1,152 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_gen_server2_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ gen_server2_with_state,
+ mcall
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+gen_server2_with_state(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, gen_server2_with_state1, [Config]).
+
+gen_server2_with_state1(_Config) ->
+ fhc_state = gen_server2:with_state(file_handle_cache,
+ fun (S) -> element(1, S) end),
+ passed.
+
+
+mcall(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, mcall1, [Config]).
+
+mcall1(_Config) ->
+ P1 = spawn(fun gs2_test_listener/0),
+ register(foo, P1),
+ global:register_name(gfoo, P1),
+
+ P2 = spawn(fun() -> exit(bang) end),
+ %% ensure P2 is dead (ignore the race setting up the monitor)
+ await_exit(P2),
+
+ P3 = spawn(fun gs2_test_crasher/0),
+
+ %% since P2 crashes almost immediately and P3 after receiving its first
+ %% message, we have to spawn a few more processes to handle the additional
+ %% cases we're interested in here
+ register(baz, spawn(fun gs2_test_crasher/0)),
+ register(bog, spawn(fun gs2_test_crasher/0)),
+ global:register_name(gbaz, spawn(fun gs2_test_crasher/0)),
+
+ NoNode = rabbit_nodes:make("nonode"),
+
+ Targets =
+ %% pids
+ [P1, P2, P3]
+ ++
+ %% registered names
+ [foo, bar, baz]
+ ++
+ %% {Name, Node} pairs
+ [{foo, node()}, {bar, node()}, {bog, node()}, {foo, NoNode}]
+ ++
+ %% {global, Name}
+ [{global, gfoo}, {global, gbar}, {global, gbaz}],
+
+ GoodResults = [{D, goodbye} || D <- [P1, foo,
+ {foo, node()},
+ {global, gfoo}]],
+
+ BadResults = [{P2, noproc}, % died before use
+ {P3, boom}, % died on first use
+ {bar, noproc}, % never registered
+ {baz, boom}, % died on first use
+ {{bar, node()}, noproc}, % never registered
+ {{bog, node()}, boom}, % died on first use
+ {{foo, NoNode}, nodedown}, % invalid node
+ {{global, gbar}, noproc}, % never registered globally
+ {{global, gbaz}, boom}], % died on first use
+
+ {Replies, Errors} = gen_server2:mcall([{T, hello} || T <- Targets]),
+ true = lists:sort(Replies) == lists:sort(GoodResults),
+ true = lists:sort(Errors) == lists:sort(BadResults),
+
+ %% cleanup (ignore the race setting up the monitor)
+ P1 ! stop,
+ await_exit(P1),
+ passed.
+
+await_exit(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MRef, _, _, _} -> ok
+ end.
+
+gs2_test_crasher() ->
+ receive
+ {'$gen_call', _From, hello} -> exit(boom)
+ end.
+
+gs2_test_listener() ->
+ receive
+ {'$gen_call', From, hello} ->
+ gen_server2:reply(From, goodbye),
+ gs2_test_listener();
+ stop ->
+ ok
+ end.
diff --git a/deps/rabbit/test/unit_gm_SUITE.erl b/deps/rabbit/test/unit_gm_SUITE.erl
new file mode 100644
index 0000000000..74400ddaa5
--- /dev/null
+++ b/deps/rabbit/test/unit_gm_SUITE.erl
@@ -0,0 +1,242 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_gm_SUITE).
+
+-behaviour(gm).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include("gm_specs.hrl").
+
+-compile(export_all).
+
+-define(RECEIVE_OR_THROW(Body, Bool, Error),
+ receive Body ->
+ true = Bool,
+ passed
+ after 5000 ->
+ throw(Error)
+ end).
+
+all() ->
+ [
+ join_leave,
+ broadcast,
+ confirmed_broadcast,
+ member_death,
+ receive_in_order,
+ unexpected_msg,
+ down_in_members_change
+ ].
+
+init_per_suite(Config) ->
+ ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
+ ok = application:start(mnesia),
+ {ok, FHC} = file_handle_cache:start_link(),
+ unlink(FHC),
+ {ok, WPS} = worker_pool_sup:start_link(),
+ unlink(WPS),
+ rabbit_ct_helpers:set_config(Config, [
+ {file_handle_cache_pid, FHC},
+ {worker_pool_sup_pid, WPS}
+ ]).
+
+end_per_suite(Config) ->
+ exit(?config(worker_pool_sup_pid, Config), shutdown),
+ exit(?config(file_handle_cache_pid, Config), shutdown),
+ ok = application:stop(mnesia),
+ Config.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+join_leave(_Config) ->
+ passed = with_two_members(fun (_Pid, _Pid2) -> passed end).
+
+broadcast(_Config) ->
+ passed = do_broadcast(fun gm:broadcast/2).
+
+confirmed_broadcast(_Config) ->
+ passed = do_broadcast(fun gm:confirmed_broadcast/2).
+
+member_death(_Config) ->
+ passed = with_two_members(
+ fun (Pid, Pid2) ->
+ {ok, Pid3} = gm:start_link(
+ ?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
+ timeout_joining_gm_group_3),
+ passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
+ passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
+
+ unlink(Pid3),
+ exit(Pid3, kill),
+
+ %% Have to do some broadcasts to ensure that all members
+ %% find out about the death.
+ BFun = broadcast_fun(fun gm:confirmed_broadcast/2),
+ passed = BFun(Pid, Pid2),
+ passed = BFun(Pid, Pid2),
+
+ passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
+ passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
+
+ passed
+ end).
+
+receive_in_order(_Config) ->
+ passed = with_two_members(
+ fun (Pid, Pid2) ->
+ Numbers = lists:seq(1,1000),
+ [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
+ || N <- Numbers],
+ passed = receive_numbers(
+ Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
+ passed = receive_numbers(
+ Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
+ passed = receive_numbers(
+ Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
+ passed = receive_numbers(
+ Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
+ passed
+ end).
+
+unexpected_msg(_Config) ->
+ passed = with_two_members(
+ fun(Pid, _) ->
+ Pid ! {make_ref(), old_gen_server_answer},
+ true = erlang:is_process_alive(Pid),
+ passed
+ end).
+
+down_in_members_change(_Config) ->
+ %% Setup
+ ok = gm:create_tables(),
+ {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
+ {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
+ passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
+
+ %% Test. Simulate that the gm group is deleted (forget_group) while
+ %% processing the 'DOWN' message from the neighbour
+ process_flag(trap_exit, true),
+ ok = meck:new(mnesia, [passthrough]),
+ ok = meck:expect(mnesia, read, fun({gm_group, ?MODULE}) ->
+ [];
+ (Key) ->
+ meck:passthrough([Key])
+ end),
+ gm:leave(Pid2),
+ Passed = receive
+ {'EXIT', Pid, shutdown} ->
+ passed;
+ {'EXIT', Pid, _} ->
+ crashed
+ after 15000 ->
+ timeout
+ end,
+ %% Cleanup
+ meck:unload(mnesia),
+ process_flag(trap_exit, false),
+ passed = Passed.
+
+
+do_broadcast(Fun) ->
+ with_two_members(broadcast_fun(Fun)).
+
+broadcast_fun(Fun) ->
+ fun (Pid, Pid2) ->
+ ok = Fun(Pid, magic_message),
+ passed = receive_or_throw({msg, Pid, Pid, magic_message},
+ timeout_waiting_for_msg),
+ passed = receive_or_throw({msg, Pid2, Pid, magic_message},
+ timeout_waiting_for_msg)
+ end.
+
+with_two_members(Fun) ->
+ ok = gm:create_tables(),
+
+ {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
+
+ {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
+ passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
+
+ passed = Fun(Pid, Pid2),
+
+ ok = gm:leave(Pid),
+ passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
+ passed =
+ receive_termination(Pid, normal, timeout_waiting_for_termination_1),
+
+ ok = gm:leave(Pid2),
+ passed =
+ receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
+
+ receive X -> throw({unexpected_message, X})
+ after 0 -> passed
+ end.
+
+receive_or_throw(Pattern, Error) ->
+ ?RECEIVE_OR_THROW(Pattern, true, Error).
+
+receive_birth(From, Born, Error) ->
+ ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+ ([Born] == Birth) andalso ([] == Death),
+ Error).
+
+receive_death(From, Died, Error) ->
+ ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+ ([] == Birth) andalso ([Died] == Death),
+ Error).
+
+receive_joined(From, Members, Error) ->
+ ?RECEIVE_OR_THROW({joined, From, Members1},
+ lists:usort(Members) == lists:usort(Members1),
+ Error).
+
+receive_termination(From, Reason, Error) ->
+ ?RECEIVE_OR_THROW({termination, From, Reason1},
+ Reason == Reason1,
+ Error).
+
+receive_numbers(_Pid, _Sender, _Error, []) ->
+ passed;
+receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
+ ?RECEIVE_OR_THROW({msg, Pid, Sender, M},
+ M == N,
+ Error),
+ receive_numbers(Pid, Sender, Error, Numbers).
+
+%% -------------------------------------------------------------------
+%% gm behavior callbacks.
+%% -------------------------------------------------------------------
+
+joined(Pid, Members) ->
+ Pid ! {joined, self(), Members},
+ ok.
+
+members_changed(Pid, Births, Deaths) ->
+ Pid ! {members_changed, self(), Births, Deaths},
+ ok.
+
+handle_msg(Pid, From, Msg) ->
+ Pid ! {msg, self(), From, Msg},
+ ok.
+
+handle_terminate(Pid, Reason) ->
+ Pid ! {termination, self(), Reason},
+ ok.
diff --git a/deps/rabbit/test/unit_log_config_SUITE.erl b/deps/rabbit/test/unit_log_config_SUITE.erl
new file mode 100644
index 0000000000..6be403fd3e
--- /dev/null
+++ b/deps/rabbit/test/unit_log_config_SUITE.erl
@@ -0,0 +1,837 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_log_config_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ default,
+ env_var_tty,
+ config_file_handler,
+ config_file_handler_level,
+ config_file_handler_rotation,
+ config_console_handler,
+ config_exchange_handler,
+ config_syslog_handler,
+ config_syslog_handler_options,
+ config_multiple_handlers,
+
+ env_var_overrides_config,
+ env_var_disable_log,
+
+ config_sinks_level,
+ config_sink_file,
+ config_sink_file_override_config_handler_file,
+
+ config_handlers_merged_with_lager_handlers,
+ sink_handlers_merged_with_lager_extra_sinks_handlers,
+ sink_file_rewrites_file_backends
+ ].
+
+init_per_testcase(_, Config) ->
+ application:load(rabbit),
+ application:load(lager),
+ application:unset_env(rabbit, log),
+ application:unset_env(rabbit, lager_log_root),
+ application:unset_env(rabbit, lager_default_file),
+ application:unset_env(rabbit, lager_upgrade_file),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, rabbit_handlers),
+ application:unset_env(lager, extra_sinks),
+ unset_logs_var_origin(),
+ Config.
+
+end_per_testcase(_, Config) ->
+ application:unset_env(rabbit, log),
+ application:unset_env(rabbit, lager_log_root),
+ application:unset_env(rabbit, lager_default_file),
+ application:unset_env(rabbit, lager_upgrade_file),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, rabbit_handlers),
+ application:unset_env(lager, extra_sinks),
+ unset_logs_var_origin(),
+ application:unload(rabbit),
+ application:unload(lager),
+ Config.
+
+sink_file_rewrites_file_backends(_) ->
+ application:set_env(rabbit, log, [
+ %% Disable rabbit file handler
+ {file, [{file, false}]},
+ {categories, [{federation, [{file, "federation.log"}, {level, warning}]}]}
+ ]),
+
+ LagerHandlers = [
+ {lager_file_backend, [{file, "lager_file.log"}, {level, error}]},
+ {lager_file_backend, [{file, "lager_file_1.log"}, {level, error}]},
+ {lager_console_backend, [{level, info}]},
+ {lager_exchange_backend, [{level, info}]}
+ ],
+ application:set_env(lager, handlers, LagerHandlers),
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks(sink_rewrite_sinks()),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+sink_rewrite_sinks() ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[
+ {lager_file_backend,
+ [{date, ""},
+ {file, "federation.log"},
+ {formatter_config, formatter_config(file)},
+ {level, warning},
+ {size, 0}]},
+ {lager_console_backend, [{level, warning}]},
+ {lager_exchange_backend, [{level, warning}]}
+ ]},
+ {rabbit_handlers,[
+ {lager_file_backend,
+ [{date, ""},
+ {file, "federation.log"},
+ {formatter_config, formatter_config(file)},
+ {level, warning},
+ {size, 0}]},
+ {lager_console_backend, [{level, warning}]},
+ {lager_exchange_backend, [{level, warning}]}
+ ]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}
+ ].
+
+sink_handlers_merged_with_lager_extra_sinks_handlers(_) ->
+ DefaultLevel = debug,
+ application:set_env(rabbit, log, [
+ {file, [{file, "rabbit_file.log"}, {level, DefaultLevel}]},
+ {console, [{enabled, true}, {level, error}]},
+ {exchange, [{enabled, true}, {level, error}]},
+ {categories, [
+ {connection, [{level, debug}]},
+ {channel, [{level, warning}, {file, "channel_log.log"}]}
+ ]}
+ ]),
+
+ LagerSinks = [
+ {rabbit_log_connection_lager_event,
+ [{handlers,
+ [{lager_file_backend,
+ [{file, "connection_lager.log"},
+ {level, info}]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,
+ [{lager_console_backend, [{level, debug}]},
+ {lager_exchange_backend, [{level, debug}]},
+ {lager_file_backend, [{level, error},
+ {file, "channel_lager.log"}]}]}]}],
+
+ application:set_env(lager, extra_sinks, LagerSinks),
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks([
+ {error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[
+ {lager_console_backend, [{level, error},
+ {formatter_config, formatter_config(console)}]},
+ {lager_exchange_backend, [{level, error},
+ {formatter_config, formatter_config(exchange)}]},
+ {lager_file_backend,
+ [{date, ""},
+ {file, "channel_log.log"},
+ {formatter_config, formatter_config(file)},
+ {level, warning},
+ {size, 0}]},
+ {lager_console_backend, [{level, debug}]},
+ {lager_exchange_backend, [{level, debug}]},
+ {lager_file_backend, [{level, error},
+ {file, "channel_lager.log"}]}
+ ]},
+ {rabbit_handlers,[
+ {lager_console_backend, [{level, error},
+ {formatter_config, formatter_config(console)}]},
+ {lager_exchange_backend, [{level, error},
+ {formatter_config, formatter_config(exchange)}]},
+ {lager_file_backend,
+ [{date, ""},
+ {file, "channel_log.log"},
+ {formatter_config, formatter_config(file)},
+ {level, warning},
+ {size, 0}]}]}
+ ]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,debug]},
+ {lager_file_backend, [{file, "connection_lager.log"}, {level, info}]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,debug]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}]),
+
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+config_handlers_merged_with_lager_handlers(_) ->
+ application:set_env(rabbit, log, [
+ {file, [{file, "rabbit_file.log"}, {level, debug}]},
+ {console, [{enabled, true}, {level, error}]},
+ {exchange, [{enabled, true}, {level, error}]},
+ {syslog, [{enabled, true}]}
+ ]),
+
+ LagerHandlers = [
+ {lager_file_backend, [{file, "lager_file.log"}, {level, info}]},
+ {lager_console_backend, [{level, info}]},
+ {lager_exchange_backend, [{level, info}]},
+ {lager_exchange_backend, [{level, info}]}
+ ],
+ application:set_env(lager, handlers, LagerHandlers),
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers("rabbit_file.log", debug),
+ ConsoleHandlers = expected_console_handler(error),
+ RabbitHandlers = expected_rabbit_handler(error),
+ SyslogHandlers = expected_syslog_handler(),
+
+ ExpectedRabbitHandlers = sort_handlers(FileHandlers ++ ConsoleHandlers ++ RabbitHandlers ++ SyslogHandlers),
+ ExpectedHandlers = sort_handlers(ExpectedRabbitHandlers ++ LagerHandlers),
+
+ ?assertEqual(ExpectedRabbitHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))).
+
+config_sinks_level(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ application:set_env(rabbit, log, [
+ {categories, [
+ {connection, [{level, warning}]},
+ {channel, [{level, debug}]},
+ {mirroring, [{level, error}]}
+ ]}
+ ]),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks(level_sinks()),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+level_sinks() ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,debug]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,debug]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,warning]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,warning]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,error]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,error]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,
+ [lager_event,info]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}
+ ].
+
+config_sink_file(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ DefaultLevel = error,
+ application:set_env(rabbit, log, [
+ {console, [{enabled, true}]},
+ {exchange, [{enabled, true}]},
+ {file, [{level, DefaultLevel}]},
+ {categories, [
+ {connection, [{file, "connection.log"}, {level, warning}]}
+ ]}
+ ]),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks(file_sinks(DefaultLevel)),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+config_sink_file_override_config_handler_file(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ NonDefaultLogFile = "rabbit_not_default.log",
+
+ DefaultLevel = error,
+ application:set_env(rabbit, log, [
+ {file, [{file, NonDefaultLogFile}, {level, DefaultLevel}]},
+ {console, [{enabled, true}]},
+ {exchange, [{enabled, true}]},
+ {categories, [
+ {connection, [{file, "connection.log"}, {level, warning}]}
+ ]}
+ ]),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks(file_sinks(DefaultLevel)),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+file_sinks() ->
+ file_sinks(info).
+
+file_sinks(DefaultLevel) ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[
+ {lager_console_backend, [{level, warning},
+ {formatter_config, formatter_config(console)}]},
+ {lager_exchange_backend, [{level, warning},
+ {formatter_config, formatter_config(exchange)}]},
+ {lager_file_backend,
+ [{date, ""},
+ {file, "connection.log"},
+ {formatter_config, formatter_config(file)},
+ {level, error},
+ {size, 0}]}]},
+ {rabbit_handlers,[
+ {lager_console_backend, [{level, warning},
+ {formatter_config, formatter_config(console)}]},
+ {lager_exchange_backend, [{level, warning},
+ {formatter_config, formatter_config(exchange)}]},
+ {lager_file_backend,
+ [{date, ""},
+ {file, "connection.log"},
+ {formatter_config, formatter_config(backend)},
+ {level, error},
+ {size, 0}]}]}
+ ]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}
+ ].
+
+config_multiple_handlers(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ application:set_env(rabbit, log, [
+ %% Disable file output
+ {file, [{file, false}]},
+ %% Enable console output
+ {console, [{enabled, true}]},
+ %% Enable exchange output
+ {exchange, [{enabled, true}]},
+ %% Enable a syslog output
+ {syslog, [{enabled, true}, {level, error}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ ConsoleHandlers = expected_console_handler(),
+ RabbitHandlers = expected_rabbit_handler(),
+ SyslogHandlers = expected_syslog_handler(error),
+
+ ExpectedHandlers = sort_handlers(SyslogHandlers ++ ConsoleHandlers ++ RabbitHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_console_handler(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+ application:set_env(rabbit, log, [{console, [{enabled, true}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers(DefaultLogFile),
+ ConsoleHandlers = expected_console_handler(),
+
+ ExpectedHandlers = sort_handlers(FileHandlers ++ ConsoleHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_exchange_handler(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+ application:set_env(rabbit, log, [{exchange, [{enabled, true}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers(DefaultLogFile),
+ ExchangeHandlers = expected_rabbit_handler(),
+
+ ExpectedHandlers = sort_handlers(FileHandlers ++ ExchangeHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+expected_console_handler() ->
+ expected_console_handler(debug).
+
+expected_console_handler(Level) ->
+ [{lager_console_backend, [{level, Level},
+ {formatter_config, formatter_config(console)}]}].
+
+expected_rabbit_handler() ->
+ expected_rabbit_handler(debug).
+
+expected_rabbit_handler(Level) ->
+ [{lager_exchange_backend, [{level, Level},
+ {formatter_config, formatter_config(exchange)}]}].
+
+config_syslog_handler(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+ application:set_env(rabbit, log, [{syslog, [{enabled, true}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers(DefaultLogFile),
+ SyslogHandlers = expected_syslog_handler(),
+
+ ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_syslog_handler_options(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+ application:set_env(rabbit, log, [{syslog, [{enabled, true},
+ {level, warning}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers(DefaultLogFile),
+ SyslogHandlers = expected_syslog_handler(warning),
+
+ ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+expected_syslog_handler() ->
+ expected_syslog_handler(debug).
+
+expected_syslog_handler(Level) ->
+ [{syslog_lager_backend, [Level,
+ {},
+ {lager_default_formatter, syslog_formatter_config()}]}].
+
+env_var_overrides_config(_) ->
+ EnvLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, EnvLogFile),
+
+ ConfigLogFile = "rabbit_not_default.log",
+ application:set_env(rabbit, log, [{file, [{file, ConfigLogFile}]}]),
+
+ set_logs_var_origin(environment),
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = default_expected_handlers(EnvLogFile),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+env_var_disable_log(_) ->
+ application:set_env(rabbit, lager_default_file, false),
+
+ ConfigLogFile = "rabbit_not_default.log",
+ application:set_env(rabbit, log, [{file, [{file, ConfigLogFile}]}]),
+
+ set_logs_var_origin(environment),
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = [],
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_file_handler(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ NonDefaultLogFile = "rabbit_not_default.log",
+ application:set_env(rabbit, log, [{file, [{file, NonDefaultLogFile}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = default_expected_handlers(NonDefaultLogFile),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_file_handler_level(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ application:set_env(rabbit, log, [{file, [{level, warning}]}]),
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = default_expected_handlers(DefaultLogFile, warning),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_file_handler_rotation(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ application:set_env(rabbit, log, [{file, [{date, "$D0"}, {size, 5000}, {count, 10}]}]),
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = sort_handlers(default_expected_handlers(DefaultLogFile, debug, 5000, "$D0", [{count, 10}])),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+default(_) ->
+ LogRoot = "/tmp/log_base",
+ application:set_env(rabbit, lager_log_root, LogRoot),
+ LogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, LogFile),
+ LogUpgradeFile = "rabbit_default_upgrade.log",
+ application:set_env(rabbit, lager_upgrade_file, LogUpgradeFile),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = default_expected_handlers(LogFile),
+ LogRoot = application:get_env(lager, log_root, undefined),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
+
+ ExpectedSinks = default_expected_sinks(LogUpgradeFile),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+default_expected_handlers(File) ->
+ default_expected_handlers(File, debug, 0, "").
+default_expected_handlers(File, Level) ->
+ default_expected_handlers(File, Level, 0, "").
+default_expected_handlers(File, Level, RotSize, RotDate) ->
+ default_expected_handlers(File, Level, RotSize, RotDate, []).
+default_expected_handlers(File, Level, RotSize, RotDate, Extra) ->
+ [{lager_file_backend,
+ [{date, RotDate},
+ {file, File},
+ {formatter_config, formatter_config(file)},
+ {level, Level},
+ {size, RotSize}] ++ Extra}].
+
+default_expected_sinks(UpgradeFile) ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,
+ [{lager_file_backend,
+ [{date,[]},
+ {file, UpgradeFile},
+ {formatter_config, formatter_config(file)},
+ {level,info},
+ {size,0}]}]},
+ {rabbit_handlers,
+ [{lager_file_backend,
+ [{date,[]},
+ {file, UpgradeFile},
+ {formatter_config, formatter_config(file)},
+ {level,info},
+ {size,0}]}]}]}].
+
+env_var_tty(_) ->
+ application:set_env(rabbit, lager_log_root, "/tmp/log_base"),
+ application:set_env(rabbit, lager_default_file, tty),
+ application:set_env(rabbit, lager_upgrade_file, tty),
+ %% tty can only be set explicitly
+ set_logs_var_origin(environment),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = tty_expected_handlers(),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
+
+ %% Upgrade sink will be different.
+ ExpectedSinks = tty_expected_sinks(),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+set_logs_var_origin(Origin) ->
+ Context = #{var_origins => #{main_log_file => Origin}},
+ rabbit_prelaunch:store_context(Context),
+ ok.
+
+unset_logs_var_origin() ->
+ rabbit_prelaunch:clear_context_cache(),
+ ok.
+
+tty_expected_handlers() ->
+ [{lager_console_backend,
+ [{formatter_config, formatter_config(console)},
+ {level, debug}]}].
+
+tty_expected_sinks() ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers, [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}].
+
+sort_sinks(Sinks) ->
+ lists:ukeysort(1,
+ lists:map(
+ fun({Name, Config}) ->
+ Handlers = proplists:get_value(handlers, Config),
+ RabbitHandlers = proplists:get_value(rabbit_handlers, Config),
+ {Name, lists:ukeymerge(1,
+ [{handlers, sort_handlers(Handlers)},
+ {rabbit_handlers, sort_handlers(RabbitHandlers)}],
+ lists:ukeysort(1, Config))}
+ end,
+ Sinks)).
+
+sort_handlers(Handlers) ->
+ lists:keysort(1,
+ lists:map(
+ fun
+ ({Name, [{Atom, _}|_] = Config}) when is_atom(Atom) ->
+ {Name, lists:ukeysort(1, Config)};
+ %% Non-proplist configuration. forwarder backend
+ (Other) ->
+ Other
+ end,
+ Handlers)).
+
+formatter_config(console) ->
+ [date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\r\n"];
+formatter_config(_) ->
+ [date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\n"].
+
+syslog_formatter_config() ->
+ [color,"[",severity, "] ", {pid,[]}, " ",message,"\n"].
diff --git a/deps/rabbit/test/unit_log_management_SUITE.erl b/deps/rabbit/test/unit_log_management_SUITE.erl
new file mode 100644
index 0000000000..9fc9c7839d
--- /dev/null
+++ b/deps/rabbit/test/unit_log_management_SUITE.erl
@@ -0,0 +1,413 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_log_management_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ log_management,
+ log_file_initialised_during_startup,
+ log_file_fails_to_initialise_during_startup,
+ externally_rotated_logs_are_automatically_reopened
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Application management.
+%% -------------------------------------------------------------------
+
+app_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, app_management1, [Config]).
+
+app_management1(_Config) ->
+ wait_for_application(rabbit),
+ %% Starting, stopping and diagnostics. Note that we don't try
+ %% 'report' when the rabbit app is stopped and that we enable
+ %% tracing for the duration of this function.
+ ok = rabbit_trace:start(<<"/">>),
+ ok = rabbit:stop(),
+ ok = rabbit:stop(),
+ ok = no_exceptions(rabbit, status, []),
+ ok = no_exceptions(rabbit, environment, []),
+ ok = rabbit:start(),
+ ok = rabbit:start(),
+ ok = no_exceptions(rabbit, status, []),
+ ok = no_exceptions(rabbit, environment, []),
+ ok = rabbit_trace:stop(<<"/">>),
+ passed.
+
+no_exceptions(Mod, Fun, Args) ->
+ try erlang:apply(Mod, Fun, Args) of _ -> ok
+ catch Type:Ex -> {Type, Ex}
+ end.
+
+wait_for_application(Application) ->
+ wait_for_application(Application, 5000).
+
+wait_for_application(_, Time) when Time =< 0 ->
+ {error, timeout};
+wait_for_application(Application, Time) ->
+ Interval = 100,
+ case lists:keyfind(Application, 1, application:which_applications()) of
+ false ->
+ timer:sleep(Interval),
+ wait_for_application(Application, Time - Interval);
+ _ -> ok
+ end.
+
+
+
+%% -------------------------------------------------------------------
+%% Log management.
+%% -------------------------------------------------------------------
+
+log_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, log_management1, [Config]).
+
+log_management1(_Config) ->
+ [LogFile|_] = rabbit:log_locations(),
+ Suffix = ".0",
+
+ ok = test_logs_working([LogFile]),
+
+ %% prepare basic logs
+ file:delete(LogFile ++ Suffix),
+ ok = test_logs_working([LogFile]),
+
+ %% simple log rotation
+ ok = rabbit:rotate_logs(),
+ %% rabbit:rotate_logs/0 is asynchronous due to a limitation in
+ %% Lager. Therefore, we have no choice but to wait an arbitrary
+ %% amount of time.
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ [true, true] =:=
+ non_empty_files([LogFile ++ Suffix, LogFile])
+ end, 5000),
+ ok = test_logs_working([LogFile]),
+
+ %% log rotation on empty files
+ ok = clean_logs([LogFile], Suffix),
+ ok = rabbit:rotate_logs(),
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ [true, true] =:=
+ non_empty_files([LogFile ++ Suffix, LogFile])
+ end, 5000),
+
+ %% logs with suffix are not writable
+ ok = rabbit:rotate_logs(),
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ ok =:= make_files_non_writable([LogFile ++ Suffix])
+ end, 5000),
+ ok = rabbit:rotate_logs(),
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ ok =:= test_logs_working([LogFile])
+ end, 5000),
+
+ %% rotate when original log files are not writable
+ ok = make_files_non_writable([LogFile]),
+ ok = rabbit:rotate_logs(),
+ timer:sleep(2000),
+
+ %% logging directed to tty (first, remove handlers)
+ ok = rabbit:stop(),
+ ok = make_files_writable([LogFile ++ Suffix]),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_default_file, tty),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ timer:sleep(200),
+ rabbit_log:info("test info"),
+
+ %% rotate logs when logging is turned off
+ ok = rabbit:stop(),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_default_file, false),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ timer:sleep(200),
+ rabbit_log:error("test error"),
+ timer:sleep(200),
+ ?assertEqual([{error,enoent}], empty_files([LogFile])),
+
+ %% cleanup
+ ok = rabbit:stop(),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_default_file, LogFile),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ ok = test_logs_working([LogFile]),
+ passed.
+
+log_file_initialised_during_startup(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, log_file_initialised_during_startup1, [Config]).
+
+log_file_initialised_during_startup1(_Config) ->
+ [LogFile|_] = rabbit:log_locations(),
+ Suffix = ".0",
+
+ %% start application with simple tty logging
+ ok = rabbit:stop(),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_default_file, tty),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+
+ %% start application with logging to non-existing directory
+ NonExistent = rabbit_misc:format(
+ "/tmp/non-existent/~s.log", [?FUNCTION_NAME]),
+ delete_file(NonExistent),
+ delete_file(filename:dirname(NonExistent)),
+ ok = rabbit:stop(),
+ ct:pal("Setting lager_default_file to \"~s\"", [NonExistent]),
+ ok = application:set_env(rabbit, lager_default_file, NonExistent),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+
+ %% clean up
+ ok = application:set_env(rabbit, lager_default_file, LogFile),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ passed.
+
+
+log_file_fails_to_initialise_during_startup(Config) ->
+ NonWritableDir = case os:type() of
+ {win32, _} -> "C:/Windows";
+ _ -> "/"
+ end,
+ case file:open(filename:join(NonWritableDir, "test.log"), [write]) of
+ {error, eacces} ->
+ passed = rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ ?MODULE, log_file_fails_to_initialise_during_startup1,
+ [Config, NonWritableDir]);
+ %% macOS, "read only volume"
+ {error, erofs} ->
+ passed = rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ ?MODULE, log_file_fails_to_initialise_during_startup1,
+ [Config, NonWritableDir]);
+ {ok, Fd} ->
+ %% If the supposedly non-writable directory is writable
+ %% (e.g. we are running the testsuite on Windows as
+ %% Administrator), we skip this test.
+ file:close(Fd),
+ {skip, "Supposedly non-writable directory is writable"}
+ end.
+
+log_file_fails_to_initialise_during_startup1(_Config, NonWritableDir) ->
+ [LogFile|_] = rabbit:log_locations(),
+ delete_file(LogFile),
+ Fn = rabbit_misc:format("~s.log", [?FUNCTION_NAME]),
+
+ %% start application with logging to directory with no
+ %% write permissions
+ NoPermission1 = filename:join(NonWritableDir, Fn),
+ delete_file(NoPermission1),
+ delete_file(filename:dirname(NoPermission1)),
+
+ ok = rabbit:stop(),
+ ct:pal("Setting lager_default_file to \"~s\"", [NoPermission1]),
+ ok = application:set_env(rabbit, lager_default_file, NoPermission1),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+
+ ct:pal("`rabbit` application env.: ~p", [application:get_all_env(rabbit)]),
+ ?assertThrow(
+ {error, {rabbit, {{cannot_log_to_file, _, _}, _}}},
+ rabbit:start()),
+
+ %% start application with logging to a subdirectory which
+ %% parent directory has no write permissions
+ NoPermission2 = filename:join([NonWritableDir,
+ "non-existent",
+ Fn]),
+ delete_file(NoPermission2),
+ delete_file(filename:dirname(NoPermission2)),
+
+ ct:pal("Setting lager_default_file to \"~s\"", [NoPermission2]),
+ ok = application:set_env(rabbit, lager_default_file, NoPermission2),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+
+ ct:pal("`rabbit` application env.: ~p", [application:get_all_env(rabbit)]),
+ ?assertThrow(
+ {error, {rabbit, {{cannot_log_to_file, _, _}, _}}},
+ rabbit:start()),
+
+ %% clean up
+ ok = application:set_env(rabbit, lager_default_file, LogFile),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ passed.
+
+externally_rotated_logs_are_automatically_reopened(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, externally_rotated_logs_are_automatically_reopened1, [Config]).
+
+externally_rotated_logs_are_automatically_reopened1(_Config) ->
+ [LogFile|_] = rabbit:log_locations(),
+
+ %% Make sure log file is opened
+ ok = test_logs_working([LogFile]),
+
+ %% Move it away - i.e. external log rotation happened
+ file:rename(LogFile, [LogFile, ".rotation_test"]),
+
+ %% New files should be created - test_logs_working/1 will check that
+ %% LogFile is not empty after doing some logging. And it's exactly
+ %% what we need to check here.
+ ok = test_logs_working([LogFile]),
+ passed.
+
+empty_or_nonexist_files(Files) ->
+ [case file:read_file_info(File) of
+ {ok, FInfo} -> FInfo#file_info.size == 0;
+ {error, enoent} -> true;
+ Error -> Error
+ end || File <- Files].
+
+empty_files(Files) ->
+ [case file:read_file_info(File) of
+ {ok, FInfo} -> FInfo#file_info.size == 0;
+ Error -> Error
+ end || File <- Files].
+
+non_empty_files(Files) ->
+ [case EmptyFile of
+ {error, Reason} -> {error, Reason};
+ _ -> not(EmptyFile)
+ end || EmptyFile <- empty_files(Files)].
+
+test_logs_working(LogFiles) ->
+ ok = rabbit_log:error("Log a test message"),
+ %% give the error loggers some time to catch up
+ timer:sleep(1000),
+ lists:all(fun(LogFile) -> [true] =:= non_empty_files([LogFile]) end, LogFiles),
+ ok.
+
+set_permissions(Path, Mode) ->
+ case file:read_file_info(Path) of
+ {ok, FInfo} -> file:write_file_info(
+ Path,
+ FInfo#file_info{mode=Mode});
+ Error -> Error
+ end.
+
+clean_logs(Files, Suffix) ->
+ [begin
+ ok = delete_file(File),
+ ok = delete_file([File, Suffix])
+ end || File <- Files],
+ ok.
+
+delete_file(File) ->
+ case file:delete(File) of
+ ok -> ok;
+ {error, enoent} -> ok;
+ Error -> Error
+ end.
+
+make_files_writable(Files) ->
+ [ok = file:write_file_info(File, #file_info{mode=8#644}) ||
+ File <- Files],
+ ok.
+
+make_files_non_writable(Files) ->
+ [ok = file:write_file_info(File, #file_info{mode=8#444}) ||
+ File <- Files],
+ ok.
+
+add_log_handlers(Handlers) ->
+ [ok = error_logger:add_report_handler(Handler, Args) ||
+ {Handler, Args} <- Handlers],
+ ok.
+
+%% sasl_report_file_h returns [] during terminate
+%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
+%%
+%% error_logger_file_h returns ok since OTP 18.1
+%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
+delete_log_handlers(Handlers) ->
+ [ok_or_empty_list(error_logger:delete_report_handler(Handler))
+ || Handler <- Handlers],
+ ok.
+
+ok_or_empty_list([]) ->
+ [];
+ok_or_empty_list(ok) ->
+ ok.
diff --git a/deps/rabbit/test/unit_operator_policy_SUITE.erl b/deps/rabbit/test/unit_operator_policy_SUITE.erl
new file mode 100644
index 0000000000..ae3285bb55
--- /dev/null
+++ b/deps/rabbit/test/unit_operator_policy_SUITE.erl
@@ -0,0 +1,107 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_operator_policy_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ merge_operator_policy_definitions
+ ]}
+ ].
+
+init_per_testcase(_Testcase, Config) ->
+ Config.
+
+end_per_testcase(_TC, _Config) ->
+ ok.
+
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+merge_operator_policy_definitions(_Config) ->
+ P1 = undefined,
+ P2 = [{definition, [{<<"message-ttl">>, 3000}]}],
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(P1, P2)),
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(P2, P1)),
+
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(P1, rabbit_data_coercion:to_map(P2))),
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(rabbit_data_coercion:to_map(P2), P1)),
+
+ ?assertEqual(undefined, rabbit_policy:merge_operator_definitions(undefined, undefined)),
+
+ ?assertEqual([], rabbit_policy:merge_operator_definitions([], [])),
+ ?assertEqual([], rabbit_policy:merge_operator_definitions(#{}, [])),
+ ?assertEqual([], rabbit_policy:merge_operator_definitions(#{}, #{})),
+ ?assertEqual([], rabbit_policy:merge_operator_definitions([], #{})),
+
+ %% operator policy takes precedence
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(
+ [{definition, [
+ {<<"message-ttl">>, 5000}
+ ]}],
+ [{definition, [
+ {<<"message-ttl">>, 3000}
+ ]}]
+ )),
+
+ ?assertEqual([{<<"delivery-limit">>, 20},
+ {<<"message-ttl">>, 3000}],
+ rabbit_policy:merge_operator_definitions(
+ [{definition, [
+ {<<"message-ttl">>, 5000},
+ {<<"delivery-limit">>, 20}
+ ]}],
+ [{definition, [
+ {<<"message-ttl">>, 3000}
+ ]}])
+ ),
+
+ ?assertEqual(
+ [{<<"delivery-limit">>, 20},
+ {<<"message-ttl">>, 3000},
+ {<<"unknown">>, <<"value">>}],
+
+ rabbit_policy:merge_operator_definitions(
+ #{definition => #{
+ <<"message-ttl">> => 5000,
+ <<"delivery-limit">> => 20
+ }},
+ #{definition => #{
+ <<"message-ttl">> => 3000,
+ <<"unknown">> => <<"value">>
+ }})
+ ),
+
+ ?assertEqual(
+ [{<<"delivery-limit">>, 20},
+ {<<"message-ttl">>, 3000}],
+
+ rabbit_policy:merge_operator_definitions(
+ #{definition => #{
+ <<"message-ttl">> => 5000,
+ <<"delivery-limit">> => 20
+ }},
+ [{definition, [
+ {<<"message-ttl">>, 3000}
+ ]}])
+ ),
+
+ passed.
diff --git a/deps/rabbit/test/unit_pg_local_SUITE.erl b/deps/rabbit/test/unit_pg_local_SUITE.erl
new file mode 100644
index 0000000000..54fafdd340
--- /dev/null
+++ b/deps/rabbit/test/unit_pg_local_SUITE.erl
@@ -0,0 +1,103 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_pg_local_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ pg_local,
+ pg_local_with_unexpected_deaths1,
+ pg_local_with_unexpected_deaths2
+ ]}
+ ].
+
+
+pg_local(_Config) ->
+ [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- lists:seq(0, 1)],
+ check_pg_local(ok, [], []),
+ %% P joins group a, then b, then a again
+ check_pg_local(pg_local:join(a, P), [P], []),
+ check_pg_local(pg_local:join(b, P), [P], [P]),
+ check_pg_local(pg_local:join(a, P), [P, P], [P]),
+ %% Q joins group a, then b, then b again
+ check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]),
+ check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]),
+ check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]),
+ %% P leaves groups a and a
+ check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]),
+ check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]),
+ %% leave/2 is idempotent
+ check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+ check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+ %% clean up all processes
+ [begin X ! done,
+ Ref = erlang:monitor(process, X),
+ receive {'DOWN', Ref, process, X, _Info} -> ok end
+ end || X <- [P, Q]],
+ %% ensure the groups are empty
+ check_pg_local(ok, [], []),
+ passed.
+
+pg_local_with_unexpected_deaths1(_Config) ->
+ [A, B] = [spawn(fun () -> receive X -> X end end) || _ <- lists:seq(0, 1)],
+ check_pg_local(ok, [], []),
+ %% A joins groups a and b
+ check_pg_local(pg_local:join(a, A), [A], []),
+ check_pg_local(pg_local:join(b, A), [A], [A]),
+ %% B joins group b
+ check_pg_local(pg_local:join(b, B), [A], [A, B]),
+
+ [begin erlang:exit(X, sleep_now_in_a_fire),
+ Ref = erlang:monitor(process, X),
+ receive {'DOWN', Ref, process, X, _Info} -> ok end
+ end || X <- [A, B]],
+ %% ensure the groups are empty
+ check_pg_local(ok, [], []),
+ ?assertNot(erlang:is_process_alive(A)),
+ ?assertNot(erlang:is_process_alive(B)),
+
+ passed.
+
+pg_local_with_unexpected_deaths2(_Config) ->
+ [A, B] = [spawn(fun () -> receive X -> X end end) || _ <- lists:seq(0, 1)],
+ check_pg_local(ok, [], []),
+ %% A joins groups a and b
+ check_pg_local(pg_local:join(a, A), [A], []),
+ check_pg_local(pg_local:join(b, A), [A], [A]),
+ %% B joins group b
+ check_pg_local(pg_local:join(b, B), [A], [A, B]),
+
+ %% something else yanks a record (or all of them) from the pg_local
+ %% bookkeeping table
+ ok = pg_local:clear(),
+
+ [begin erlang:exit(X, sleep_now_in_a_fire),
+ Ref = erlang:monitor(process, X),
+ receive {'DOWN', Ref, process, X, _Info} -> ok end
+ end || X <- [A, B]],
+ %% ensure the groups are empty
+ check_pg_local(ok, [], []),
+ ?assertNot(erlang:is_process_alive(A)),
+ ?assertNot(erlang:is_process_alive(B)),
+
+ passed.
+
+check_pg_local(ok, APids, BPids) ->
+ ok = pg_local:sync(),
+ ?assertEqual([true, true], [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) ||
+ {Key, Pids} <- [{a, APids}, {b, BPids}]]).
diff --git a/deps/rabbit/test/unit_plugin_directories_SUITE.erl b/deps/rabbit/test/unit_plugin_directories_SUITE.erl
new file mode 100644
index 0000000000..1195434fae
--- /dev/null
+++ b/deps/rabbit/test/unit_plugin_directories_SUITE.erl
@@ -0,0 +1,76 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_plugin_directories_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ listing_plugins_from_multiple_directories
+ ]}
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+listing_plugins_from_multiple_directories(Config) ->
+ %% Generate some fake plugins in .ez files
+ FirstDir = filename:join([?config(priv_dir, Config), "listing_plugins_from_multiple_directories-1"]),
+ SecondDir = filename:join([?config(priv_dir, Config), "listing_plugins_from_multiple_directories-2"]),
+ ok = file:make_dir(FirstDir),
+ ok = file:make_dir(SecondDir),
+ lists:foreach(fun({Dir, AppName, Vsn}) ->
+ EzName = filename:join([Dir, io_lib:format("~s-~s.ez", [AppName, Vsn])]),
+ AppFileName = lists:flatten(io_lib:format("~s-~s/ebin/~s.app", [AppName, Vsn, AppName])),
+ AppFileContents = list_to_binary(
+ io_lib:format(
+ "~p.",
+ [{application, AppName,
+ [{vsn, Vsn},
+ {applications, [kernel, stdlib, rabbit]}]}])),
+ {ok, {_, EzData}} = zip:zip(EzName, [{AppFileName, AppFileContents}], [memory]),
+ ok = file:write_file(EzName, EzData)
+ end,
+ [{FirstDir, plugin_first_dir, "3"},
+ {SecondDir, plugin_second_dir, "4"},
+ {FirstDir, plugin_both, "1"},
+ {SecondDir, plugin_both, "2"}]),
+
+ %% Everything was collected from both directories, plugin with higher
+ %% version should take precedence
+ PathSep = case os:type() of
+ {win32, _} -> ";";
+ _ -> ":"
+ end,
+ Path = FirstDir ++ PathSep ++ SecondDir,
+ Got = lists:sort([{Name, Vsn} || #plugin{name = Name, version = Vsn} <- rabbit_plugins:list(Path)]),
+ %% `rabbit` was loaded automatically by `rabbit_plugins:list/1`.
+ %% We want to unload it now so it does not interfere with other
+ %% testcases.
+ application:unload(rabbit),
+ Expected = [{plugin_both, "2"}, {plugin_first_dir, "3"}, {plugin_second_dir, "4"}],
+ case Got of
+ Expected ->
+ ok;
+ _ ->
+ ct:pal("Got ~p~nExpected: ~p", [Got, Expected]),
+ exit({wrong_plugins_list, Got})
+ end,
+ ok.
diff --git a/deps/rabbit/test/unit_plugin_versioning_SUITE.erl b/deps/rabbit/test/unit_plugin_versioning_SUITE.erl
new file mode 100644
index 0000000000..8032becedd
--- /dev/null
+++ b/deps/rabbit/test/unit_plugin_versioning_SUITE.erl
@@ -0,0 +1,170 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_plugin_versioning_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ version_support,
+ plugin_validation
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+version_support(_Config) ->
+ Examples = [
+ {[], "any version", true} %% anything goes
+ ,{[], "0.0.0", true} %% ditto
+ ,{[], "3.5.6", true} %% ditto
+ ,{["something"], "something", true} %% equal values match
+ ,{["3.5.4"], "something", false}
+ ,{["3.4.5", "3.6.0"], "0.0.0", true} %% zero version always match
+ ,{["3.4.5", "3.6.0"], "", true} %% empty version always match
+ ,{["something", "3.5.6"], "3.5.7", true} %% 3.5.7 matches ~> 3.5.6
+ ,{["3.4.0", "3.5.6"], "3.6.1", false} %% 3.6.x isn't supported
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.5.2", true} %% 3.5.2 matches ~> 3.5.2
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.5.1", false} %% lesser than the lower boundary
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.6.2", true} %% 3.6.2 matches ~> 3.6.1
+ ,{["3.5.2", "3.6.1", "3.6.8"], "3.6.2", true} %% 3.6.2 still matches ~> 3.6.1
+ ,{["3.5", "3.6", "3.7"], "3.5.1", true} %% x.y values equal to x.y.0
+ ,{["3"], "3.5.1", false} %% x values are not supported
+ ,{["3.5.2", "3.6.1"], "3.6.2.999", true} %% x.y.z.p values are supported
+ ,{["3.5.2", "3.6.2.333"], "3.6.2.999", true} %% x.y.z.p values are supported
+ ,{["3.5.2", "3.6.2.333"], "3.6.2.222", false} %% x.y.z.p values are supported
+ ,{["3.6.0", "3.7.0"], "3.6.3-alpha.1", true} %% Pre-release versions handled like semver part
+ ,{["3.6.0", "3.7.0"], "3.7.0-alpha.89", true}
+ ],
+
+ lists:foreach(
+ fun({Versions, RabbitVersion, Expected}) ->
+ {Expected, RabbitVersion, Versions} =
+ {rabbit_plugins:is_version_supported(RabbitVersion, Versions),
+ RabbitVersion, Versions}
+ end,
+ Examples),
+ ok.
+
+-record(validation_example, {rabbit_version, plugins, errors, valid}).
+
+plugin_validation(_Config) ->
+ Examples = [
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.2", ["3.5.6", "3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.6.3", "3.7.1"]}]}],
+ errors = [],
+ valid = [plugin_a, plugin_b]},
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.6"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.6.3", "3.7.0"]}]}],
+ errors =
+ [{plugin_a, [{broker_version_mismatch, "3.7.1", ["3.7.6"]}]},
+ {plugin_b, [{missing_dependency, plugin_a}]}],
+ valid = []
+ },
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.6"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.0"]}]},
+ {plugin_c, "3.7.2", ["3.7.0"], [{plugin_b, ["3.7.3"]}]}],
+ errors =
+ [{plugin_a, [{broker_version_mismatch, "3.7.1", ["3.7.6"]}]},
+ {plugin_b, [{missing_dependency, plugin_a}]},
+ {plugin_c, [{missing_dependency, plugin_b}]}],
+ valid = []
+ },
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.3"]}]},
+ {plugin_d, "3.7.2", ["3.7.0"], [{plugin_c, ["3.7.3"]}]}],
+ errors =
+ [{plugin_b, [{{dependency_version_mismatch, "3.7.1", ["3.7.3"]}, plugin_a}]},
+ {plugin_d, [{missing_dependency, plugin_c}]}],
+ valid = [plugin_a]
+ },
+ #validation_example{
+ rabbit_version = "0.0.0",
+ plugins =
+ [{plugin_a, "", ["3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.3"]}]}],
+ errors = [],
+ valid = [plugin_a, plugin_b]
+ }],
+ lists:foreach(
+ fun(#validation_example{rabbit_version = RabbitVersion,
+ plugins = PluginsExamples,
+ errors = Errors,
+ valid = ExpectedValid}) ->
+ Plugins = make_plugins(PluginsExamples),
+ {Valid, Invalid} = rabbit_plugins:validate_plugins(Plugins,
+ RabbitVersion),
+ Errors = lists:reverse(Invalid),
+ ExpectedValid = lists:reverse(lists:map(fun(#plugin{name = Name}) ->
+ Name
+ end,
+ Valid))
+ end,
+ Examples),
+ ok.
+
+make_plugins(Plugins) ->
+ lists:map(
+ fun({Name, Version, RabbitVersions, PluginsVersions}) ->
+ Deps = [K || {K,_V} <- PluginsVersions],
+ #plugin{name = Name,
+ version = Version,
+ dependencies = Deps,
+ broker_version_requirements = RabbitVersions,
+ dependency_version_requirements = PluginsVersions}
+ end,
+ Plugins).
diff --git a/deps/rabbit/test/unit_policy_validators_SUITE.erl b/deps/rabbit/test/unit_policy_validators_SUITE.erl
new file mode 100644
index 0000000000..c340d172af
--- /dev/null
+++ b/deps/rabbit/test/unit_policy_validators_SUITE.erl
@@ -0,0 +1,207 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_policy_validators_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, core_validators},
+ {group, classic_queue_mirroring_validators}
+ ].
+
+groups() ->
+ [
+ {core_validators, [parallel], [
+ alternate_exchange,
+ dead_letter_exchange,
+ dead_letter_routing_key,
+ message_ttl,
+ expires,
+ max_length,
+ max_length_bytes,
+ max_in_memory_length,
+ delivery_limit,
+ classic_queue_lazy_mode,
+ length_limit_overflow_mode
+ ]},
+
+ {classic_queue_mirroring_validators, [parallel], [
+ classic_queue_ha_mode,
+ classic_queue_ha_params
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group = classic_queue_mirroring_validators, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps());
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(classic_queue_mirroring_validators, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Core Validators
+%% -------------------------------------------------------------------
+
+alternate_exchange(_Config) ->
+ requires_binary_value(<<"alternate-exchange">>).
+
+dead_letter_exchange(_Config) ->
+ requires_binary_value(<<"dead-letter-exchange">>).
+
+dead_letter_routing_key(_Config) ->
+ requires_binary_value(<<"dead-letter-routing-key">>).
+
+message_ttl(_Config) ->
+ requires_non_negative_integer_value(<<"message-ttl">>).
+
+expires(_Config) ->
+ requires_positive_integer_value(<<"expires">>).
+
+max_length(_Config) ->
+ requires_non_negative_integer_value(<<"max-length">>).
+
+max_length_bytes(_Config) ->
+ requires_non_negative_integer_value(<<"max-length-bytes">>).
+
+max_in_memory_length(_Config) ->
+ requires_non_negative_integer_value(<<"max-in-memory-bytes">>).
+
+delivery_limit(_Config) ->
+ requires_non_negative_integer_value(<<"delivery-limit">>).
+
+classic_queue_lazy_mode(_Config) ->
+ test_valid_and_invalid_values(<<"queue-mode">>,
+ %% valid values
+ [<<"default">>, <<"lazy">>],
+ %% invalid values
+ [<<"unknown">>, <<"queue">>, <<"mode">>]).
+
+length_limit_overflow_mode(_Config) ->
+ test_valid_and_invalid_values(<<"overflow">>,
+ %% valid values
+ [<<"drop-head">>, <<"reject-publish">>, <<"reject-publish-dlx">>],
+ %% invalid values
+ [<<"unknown">>, <<"publish">>, <<"overflow">>, <<"mode">>]).
+
+
+%% -------------------------------------------------------------------
+%% CMQ Validators
+%% -------------------------------------------------------------------
+
+classic_queue_ha_mode(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, classic_queue_ha_mode1, [Config]).
+
+classic_queue_ha_mode1(_Config) ->
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"exactly">>},
+ {<<"ha-params">>, 2}
+ ])),
+
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"nodes">>},
+ {<<"ha-params">>, [<<"rabbit@host1">>, <<"rabbit@host2">>]}
+ ])),
+
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"all">>}
+ ])),
+
+ ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"lolwut">>},
+ {<<"ha-params">>, 2}
+ ])).
+
+classic_queue_ha_params(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, classic_queue_ha_mode1, [Config]).
+
+classic_queue_ha_params1(_Config) ->
+ ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"exactly">>},
+ {<<"ha-params">>, <<"2">>}
+ ])),
+
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"nodes">>},
+ {<<"ha-params">>, <<"lolwut">>}
+ ])),
+
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"all">>},
+ {<<"ha-params">>, <<"lolwut">>}
+ ])),
+
+ ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"lolwut">>},
+ {<<"ha-params">>, 2}
+ ])).
+
+%%
+%% Implementation
+%%
+
+test_valid_and_invalid_values(Mod, Key, ValidValues, InvalidValues) ->
+ [begin
+ ?assertEqual(ok, Mod:validate_policy([
+ {Key, Val}
+ ]))
+ end || Val <- ValidValues],
+ [begin
+ ?assertMatch({error, _, _}, Mod:validate_policy([
+ {Key, Val}
+ ]))
+ end || Val <- InvalidValues].
+
+test_valid_and_invalid_values(Key, ValidValues, InvalidValues) ->
+ test_valid_and_invalid_values(rabbit_policies, Key, ValidValues, InvalidValues).
+
+requires_binary_value(Key) ->
+ test_valid_and_invalid_values(Key,
+ [<<"a.binary">>, <<"b.binary">>],
+ [1, rabbit]).
+
+requires_positive_integer_value(Key) ->
+ test_valid_and_invalid_values(Key,
+ [1, 1000],
+ [0, -1, <<"a.binary">>]).
+
+requires_non_negative_integer_value(Key) ->
+ test_valid_and_invalid_values(Key,
+ [0, 1, 1000],
+ [-1000, -1, <<"a.binary">>]).
diff --git a/deps/rabbit/test/unit_priority_queue_SUITE.erl b/deps/rabbit/test/unit_priority_queue_SUITE.erl
new file mode 100644
index 0000000000..5587e7d61f
--- /dev/null
+++ b/deps/rabbit/test/unit_priority_queue_SUITE.erl
@@ -0,0 +1,184 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_priority_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ priority_queue
+ ]}
+ ].
+
+
+priority_queue(_Config) ->
+
+ false = priority_queue:is_queue(not_a_queue),
+
+ %% empty Q
+ Q = priority_queue:new(),
+ {true, true, 0, [], []} = test_priority_queue(Q),
+
+ %% 1-4 element no-priority Q
+ true = lists:all(fun (X) -> X =:= passed end,
+ lists:map(fun test_simple_n_element_queue/1,
+ lists:seq(1, 4))),
+
+ %% 1-element priority Q
+ Q1 = priority_queue:in(foo, 1, priority_queue:new()),
+ {true, false, 1, [{1, foo}], [foo]} =
+ test_priority_queue(Q1),
+
+ %% 2-element same-priority Q
+ Q2 = priority_queue:in(bar, 1, Q1),
+ {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+ test_priority_queue(Q2),
+
+ %% 2-element different-priority Q
+ Q3 = priority_queue:in(bar, 2, Q1),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q3),
+
+ %% 1-element negative priority Q
+ Q4 = priority_queue:in(foo, -1, priority_queue:new()),
+ {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4),
+
+ %% merge 2 * 1-element no-priority Qs
+ Q5 = priority_queue:join(priority_queue:in(foo, Q),
+ priority_queue:in(bar, Q)),
+ {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q5),
+
+ %% merge 1-element no-priority Q with 1-element priority Q
+ Q6 = priority_queue:join(priority_queue:in(foo, Q),
+ priority_queue:in(bar, 1, Q)),
+ {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} =
+ test_priority_queue(Q6),
+
+ %% merge 1-element priority Q with 1-element no-priority Q
+ Q7 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, Q)),
+ {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q7),
+
+ %% merge 2 * 1-element same-priority Qs
+ Q8 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, 1, Q)),
+ {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+ test_priority_queue(Q8),
+
+ %% merge 2 * 1-element different-priority Qs
+ Q9 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, 2, Q)),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q9),
+
+ %% merge 2 * 1-element different-priority Qs (other way around)
+ Q10 = priority_queue:join(priority_queue:in(bar, 2, Q),
+ priority_queue:in(foo, 1, Q)),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q10),
+
+ %% merge 2 * 2-element multi-different-priority Qs
+ Q11 = priority_queue:join(Q6, Q5),
+ {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}],
+ [bar, foo, foo, bar]} = test_priority_queue(Q11),
+
+ %% and the other way around
+ Q12 = priority_queue:join(Q5, Q6),
+ {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}],
+ [bar, foo, bar, foo]} = test_priority_queue(Q12),
+
+ %% merge with negative priorities
+ Q13 = priority_queue:join(Q4, Q5),
+ {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+ test_priority_queue(Q13),
+
+ %% and the other way around
+ Q14 = priority_queue:join(Q5, Q4),
+ {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+ test_priority_queue(Q14),
+
+ %% joins with empty queues:
+ Q1 = priority_queue:join(Q, Q1),
+ Q1 = priority_queue:join(Q1, Q),
+
+ %% insert with priority into non-empty zero-priority queue
+ Q15 = priority_queue:in(baz, 1, Q5),
+ {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
+ test_priority_queue(Q15),
+
+ %% 1-element infinity priority Q
+ Q16 = priority_queue:in(foo, infinity, Q),
+ {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
+
+ %% add infinity to 0-priority Q
+ Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q17),
+
+ %% and the other way around
+ Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q18),
+
+ %% add infinity to mixed-priority Q
+ Q19 = priority_queue:in(qux, infinity, Q3),
+ {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
+ test_priority_queue(Q19),
+
+ %% merge the above with a negative priority Q
+ Q20 = priority_queue:join(Q19, Q4),
+ {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
+ [qux, bar, foo, foo]} = test_priority_queue(Q20),
+
+ %% merge two infinity priority queues
+ Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
+ priority_queue:in(bar, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
+ test_priority_queue(Q21),
+
+ %% merge two mixed priority with infinity queues
+ Q22 = priority_queue:join(Q18, Q20),
+ {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
+ {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
+ test_priority_queue(Q22),
+
+ passed.
+
+priority_queue_in_all(Q, L) ->
+ lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L).
+
+priority_queue_out_all(Q) ->
+ case priority_queue:out(Q) of
+ {empty, _} -> [];
+ {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)]
+ end.
+
+test_priority_queue(Q) ->
+ {priority_queue:is_queue(Q),
+ priority_queue:is_empty(Q),
+ priority_queue:len(Q),
+ priority_queue:to_list(Q),
+ priority_queue_out_all(Q)}.
+
+test_simple_n_element_queue(N) ->
+ Items = lists:seq(1, N),
+ Q = priority_queue_in_all(priority_queue:new(), Items),
+ ToListRes = [{0, X} || X <- Items],
+ {true, false, N, ToListRes, Items} = test_priority_queue(Q),
+ passed.
diff --git a/deps/rabbit/test/unit_queue_consumers_SUITE.erl b/deps/rabbit/test/unit_queue_consumers_SUITE.erl
new file mode 100644
index 0000000000..0f48ea65b4
--- /dev/null
+++ b/deps/rabbit/test/unit_queue_consumers_SUITE.erl
@@ -0,0 +1,121 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_queue_consumers_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ is_same,
+ get_consumer,
+ get,
+ list_consumers
+ ].
+
+is_same(_Config) ->
+ ?assertEqual(
+ true,
+ rabbit_queue_consumers:is_same(
+ self(), <<"1">>,
+ consumer(self(), <<"1">>)
+ )),
+ ?assertEqual(
+ false,
+ rabbit_queue_consumers:is_same(
+ self(), <<"1">>,
+ consumer(self(), <<"2">>)
+ )),
+ Pid = spawn(?MODULE, function_for_process, []),
+ Pid ! whatever,
+ ?assertEqual(
+ false,
+ rabbit_queue_consumers:is_same(
+ self(), <<"1">>,
+ consumer(Pid, <<"1">>)
+ )),
+ ok.
+
+get(_Config) ->
+ Pid = spawn(?MODULE, function_for_process, []),
+ Pid ! whatever,
+ State = state(consumers([consumer(self(), <<"1">>), consumer(Pid, <<"2">>), consumer(self(), <<"3">>)])),
+ {Pid, {consumer, <<"2">>, _, _, _, _}} =
+ rabbit_queue_consumers:get(Pid, <<"2">>, State),
+ ?assertEqual(
+ undefined,
+ rabbit_queue_consumers:get(self(), <<"2">>, State)
+ ),
+ ?assertEqual(
+ undefined,
+ rabbit_queue_consumers:get(Pid, <<"1">>, State)
+ ),
+ ok.
+
+get_consumer(_Config) ->
+ Pid = spawn(unit_queue_consumers_SUITE, function_for_process, []),
+ Pid ! whatever,
+ State = state(consumers([consumer(self(), <<"1">>), consumer(Pid, <<"2">>), consumer(self(), <<"3">>)])),
+ {_Pid, {consumer, _, _, _, _, _}} =
+ rabbit_queue_consumers:get_consumer(State),
+ ?assertEqual(
+ undefined,
+ rabbit_queue_consumers:get_consumer(state(consumers([])))
+ ),
+ ok.
+
+list_consumers(_Config) ->
+ State = state(consumers([consumer(self(), <<"1">>), consumer(self(), <<"2">>), consumer(self(), <<"3">>)])),
+ Consumer = rabbit_queue_consumers:get_consumer(State),
+ {_Pid, ConsumerRecord} = Consumer,
+ CTag = rabbit_queue_consumers:consumer_tag(ConsumerRecord),
+ ConsumersWithSingleActive = rabbit_queue_consumers:all(State, Consumer, true),
+ ?assertEqual(3, length(ConsumersWithSingleActive)),
+ lists:foldl(fun({Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ CTag ->
+ ?assert(Active),
+ ?assertEqual(single_active, ActivityStatus);
+ _ ->
+ ?assertNot(Active),
+ ?assertEqual(waiting, ActivityStatus)
+ end
+ end, [], ConsumersWithSingleActive),
+ ConsumersNoSingleActive = rabbit_queue_consumers:all(State, none, false),
+ ?assertEqual(3, length(ConsumersNoSingleActive)),
+ lists:foldl(fun({Pid, _, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ ?assert(Active),
+ ?assertEqual(up, ActivityStatus)
+ end, [], ConsumersNoSingleActive),
+ ok.
+
+consumers([]) ->
+ priority_queue:new();
+consumers(Consumers) ->
+ consumers(Consumers, priority_queue:new()).
+
+consumers([H], Q) ->
+ priority_queue:in(H, Q);
+consumers([H | T], Q) ->
+ consumers(T, priority_queue:in(H, Q)).
+
+
+consumer(Pid, ConsumerTag) ->
+ {Pid, {consumer, ConsumerTag, true, 1, [], <<"guest">>}}.
+
+state(Consumers) ->
+ {state, Consumers, {}}.
+
+function_for_process() ->
+ receive
+ _ -> ok
+ end.
diff --git a/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl b/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl
new file mode 100644
index 0000000000..2ffed514e1
--- /dev/null
+++ b/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl
@@ -0,0 +1,266 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_stats_and_metrics_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ channel_statistics,
+ head_message_timestamp_statistics
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Statistics.
+%% -------------------------------------------------------------------
+
+channel_statistics(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, channel_statistics1, [Config]).
+
+channel_statistics1(_Config) ->
+ application:set_env(rabbit, collect_statistics, fine),
+
+ %% ATM this just tests the queue / exchange stats in channels. That's
+ %% by far the most complex code though.
+
+ %% Set up a channel and queue
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{}),
+ QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ QRes = rabbit_misc:r(<<"/">>, queue, QName),
+ X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
+
+ dummy_event_receiver:start(self(), [node()], [channel_stats]),
+
+ %% Check stats empty
+ Check1 = fun() ->
+ [] = ets:match(channel_queue_metrics, {Ch, QRes}),
+ [] = ets:match(channel_exchange_metrics, {Ch, X}),
+ [] = ets:match(channel_queue_exchange_metrics,
+ {Ch, {QRes, X}})
+ end,
+ test_ch_metrics(Check1, ?TIMEOUT),
+
+ %% Publish and get a message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{}, <<"">>)),
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName}),
+
+ %% Check the stats reflect that
+ Check2 = fun() ->
+ [{{Ch, QRes}, 1, 0, 0, 0, 0, 0, 0, 0}] = ets:lookup(
+ channel_queue_metrics,
+ {Ch, QRes}),
+ [{{Ch, X}, 1, 0, 0, 0, 0}] = ets:lookup(
+ channel_exchange_metrics,
+ {Ch, X}),
+ [{{Ch, {QRes, X}}, 1, 0}] = ets:lookup(
+ channel_queue_exchange_metrics,
+ {Ch, {QRes, X}})
+ end,
+ test_ch_metrics(Check2, ?TIMEOUT),
+
+ %% Check the stats are marked for removal on queue deletion.
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+ Check3 = fun() ->
+ [{{Ch, QRes}, 1, 0, 0, 0, 0, 0, 0, 1}] = ets:lookup(
+ channel_queue_metrics,
+ {Ch, QRes}),
+ [{{Ch, X}, 1, 0, 0, 0, 0}] = ets:lookup(
+ channel_exchange_metrics,
+ {Ch, X}),
+ [{{Ch, {QRes, X}}, 1, 1}] = ets:lookup(
+ channel_queue_exchange_metrics,
+ {Ch, {QRes, X}})
+ end,
+ test_ch_metrics(Check3, ?TIMEOUT),
+
+ %% Check the garbage collection removes stuff.
+ force_metric_gc(),
+ Check4 = fun() ->
+ [] = ets:lookup(channel_queue_metrics, {Ch, QRes}),
+ [{{Ch, X}, 1, 0, 0, 0, 0}] = ets:lookup(
+ channel_exchange_metrics,
+ {Ch, X}),
+ [] = ets:lookup(channel_queue_exchange_metrics,
+ {Ch, {QRes, X}})
+ end,
+ test_ch_metrics(Check4, ?TIMEOUT),
+
+ rabbit_channel:shutdown(Ch),
+ dummy_event_receiver:stop(),
+ passed.
+
+force_metric_gc() ->
+ timer:sleep(300),
+ rabbit_core_metrics_gc ! start_gc,
+ gen_server:call(rabbit_core_metrics_gc, test).
+
+test_ch_metrics(Fun, Timeout) when Timeout =< 0 ->
+ Fun();
+test_ch_metrics(Fun, Timeout) ->
+ try
+ Fun()
+ catch
+ _:{badmatch, _} ->
+ timer:sleep(1000),
+ test_ch_metrics(Fun, Timeout - 1000)
+ end.
+
+head_message_timestamp_statistics(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, head_message_timestamp1, [Config]).
+
+head_message_timestamp1(_Config) ->
+ %% there is no convenient rabbit_channel API for confirms
+ %% this test could use, so it relies on tx.* methods
+ %% and gen_server2 flushing
+ application:set_env(rabbit, collect_statistics, fine),
+
+ %% Set up a channel and queue
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{}),
+ QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ QRes = rabbit_misc:r(<<"/">>, queue, QName),
+
+ {ok, Q1} = rabbit_amqqueue:lookup(QRes),
+ QPid = amqqueue:get_pid(Q1),
+
+ %% Set up event receiver for queue
+ dummy_event_receiver:start(self(), [node()], [queue_stats]),
+
+ %% the head timestamp field is empty when the queue is empty
+ test_queue_statistics_receive_event(QPid,
+ fun (E) ->
+ (proplists:get_value(name, E) == QRes)
+ and
+ (proplists:get_value(head_message_timestamp, E) == '')
+ end),
+
+ rabbit_channel:do(Ch, #'tx.select'{}),
+ receive #'tx.select_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_tx_select_ok)
+ end,
+
+ %% Publish two messages and check that the timestamp is that of the first message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{timestamp = 1}, <<"">>)),
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{timestamp = 2}, <<"">>)),
+ rabbit_channel:do(Ch, #'tx.commit'{}),
+ rabbit_channel:flush(Ch),
+ receive #'tx.commit_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_tx_commit_ok)
+ end,
+ test_queue_statistics_receive_event(QPid,
+ fun (E) ->
+ (proplists:get_value(name, E) == QRes)
+ and
+ (proplists:get_value(head_message_timestamp, E) == 1)
+ end),
+
+ %% Consume a message and check that the timestamp is now that of the second message
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}),
+ test_queue_statistics_receive_event(QPid,
+ fun (E) ->
+ (proplists:get_value(name, E) == QRes)
+ and
+ (proplists:get_value(head_message_timestamp, E) == 2)
+ end),
+
+ %% Consume one more message and check again
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}),
+ test_queue_statistics_receive_event(QPid,
+ fun (E) ->
+ (proplists:get_value(name, E) == QRes)
+ and
+ (proplists:get_value(head_message_timestamp, E) == '')
+ end),
+
+ %% Tear down
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+ rabbit_channel:shutdown(Ch),
+ dummy_event_receiver:stop(),
+
+ passed.
+
+test_queue_statistics_receive_event(Q, Matcher) ->
+ %% Q ! emit_stats,
+ test_queue_statistics_receive_event1(Q, Matcher).
+
+test_queue_statistics_receive_event1(Q, Matcher) ->
+ receive #event{type = queue_stats, props = Props} ->
+ case Matcher(Props) of
+ true -> Props;
+ _ -> test_queue_statistics_receive_event1(Q, Matcher)
+ end
+ after ?TIMEOUT -> throw(failed_to_receive_event)
+ end.
+
+test_spawn() ->
+ {Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+ ok = rabbit_channel:do(Ch, #'channel.open'{}),
+ receive #'channel.open_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+ end,
+ {Writer, Ch}.
diff --git a/deps/rabbit/test/unit_supervisor2_SUITE.erl b/deps/rabbit/test/unit_supervisor2_SUITE.erl
new file mode 100644
index 0000000000..50633984e2
--- /dev/null
+++ b/deps/rabbit/test/unit_supervisor2_SUITE.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_supervisor2_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ check_shutdown_stop,
+ check_shutdown_ignored
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+check_shutdown_stop(_Config) ->
+ ok = check_shutdown(stop, 200, 200, 2000).
+
+check_shutdown_ignored(_Config) ->
+ ok = check_shutdown(ignored, 1, 2, 2000).
+
+check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) ->
+ {ok, Sup} = supervisor2:start_link(dummy_supervisor2, [SupTimeout]),
+ Res = lists:foldl(
+ fun (I, ok) ->
+ TestSupPid = erlang:whereis(dummy_supervisor2),
+ ChildPids =
+ [begin
+ {ok, ChildPid} =
+ supervisor2:start_child(TestSupPid, []),
+ ChildPid
+ end || _ <- lists:seq(1, ChildCount)],
+ MRef = erlang:monitor(process, TestSupPid),
+ [P ! SigStop || P <- ChildPids],
+ ok = supervisor2:terminate_child(Sup, test_sup),
+ {ok, _} = supervisor2:restart_child(Sup, test_sup),
+ receive
+ {'DOWN', MRef, process, TestSupPid, shutdown} ->
+ ok;
+ {'DOWN', MRef, process, TestSupPid, Reason} ->
+ {error, {I, Reason}}
+ end;
+ (_, R) ->
+ R
+ end, ok, lists:seq(1, Iterations)),
+ unlink(Sup),
+ MSupRef = erlang:monitor(process, Sup),
+ exit(Sup, shutdown),
+ receive
+ {'DOWN', MSupRef, process, Sup, _Reason} ->
+ ok
+ end,
+ Res.
diff --git a/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl b/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl
new file mode 100644
index 0000000000..193df1f956
--- /dev/null
+++ b/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl
@@ -0,0 +1,95 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_vm_memory_monitor_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ parse_line_linux,
+ set_vm_memory_high_watermark_command
+ ]}
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+
+parse_line_linux(_Config) ->
+ lists:foreach(fun ({S, {K, V}}) ->
+ {K, V} = vm_memory_monitor:parse_line_linux(S)
+ end,
+ [{"MemTotal: 0 kB", {'MemTotal', 0}},
+ {"MemTotal: 502968 kB ", {'MemTotal', 515039232}},
+ {"MemFree: 178232 kB", {'MemFree', 182509568}},
+ {"MemTotal: 50296888", {'MemTotal', 50296888}},
+ {"MemTotal 502968 kB", {'MemTotal', 515039232}},
+ {"MemTotal 50296866 ", {'MemTotal', 50296866}}]),
+ ok.
+
+set_vm_memory_high_watermark_command(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, set_vm_memory_high_watermark_command1, [Config]).
+
+set_vm_memory_high_watermark_command1(_Config) ->
+ MemLimitRatio = 1.0,
+ MemTotal = vm_memory_monitor:get_total_memory(),
+
+ vm_memory_monitor:set_vm_memory_high_watermark(MemLimitRatio),
+ MemLimit = vm_memory_monitor:get_memory_limit(),
+ case MemLimit of
+ MemTotal -> ok;
+ _ -> MemTotalToMemLimitRatio = MemLimit * 100.0 / MemTotal / 100,
+ ct:fail(
+ "Expected memory high watermark to be ~p (~s), but it was ~p (~.1f)",
+ [MemTotal, MemLimitRatio, MemLimit, MemTotalToMemLimitRatio]
+ )
+ end.
diff --git a/deps/rabbit/test/upgrade_preparation_SUITE.erl b/deps/rabbit/test/upgrade_preparation_SUITE.erl
new file mode 100644
index 0000000000..880238515a
--- /dev/null
+++ b/deps/rabbit/test/upgrade_preparation_SUITE.erl
@@ -0,0 +1,109 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(upgrade_preparation_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {clustered, [], [
+ await_quorum_plus_one
+ ]}
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Test Case
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3},
+ {rmq_nodename_suffix, Group}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+
+init_per_testcase(TestCase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, TestCase),
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok -> Config;
+ Skip -> Skip
+ end.
+
+end_per_testcase(TestCase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, TestCase).
+
+
+
+%%
+%% Test Cases
+%%
+
+-define(WAITING_INTERVAL, 10000).
+
+await_quorum_plus_one(Config) ->
+ catch delete_queues(),
+ [A, B, _C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ declare(Ch, <<"qq.1">>, [{<<"x-queue-type">>, longstr, <<"quorum">>}]),
+ timer:sleep(100),
+ ?assert(await_quorum_plus_one(Config, 0)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, B),
+ ?assertNot(await_quorum_plus_one(Config, 0)),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, B),
+ ?assert(await_quorum_plus_one(Config, 0)).
+
+%%
+%% Implementation
+%%
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+
+delete_queues() ->
+ [rabbit_amqqueue:delete(Q, false, false, <<"tests">>) || Q <- rabbit_amqqueue:list()].
+
+await_quorum_plus_one(Config, Node) ->
+ await_quorum_plus_one(Config, Node, ?WAITING_INTERVAL).
+
+await_quorum_plus_one(Config, Node, Timeout) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_upgrade_preparation, await_online_quorum_plus_one, [Timeout], Timeout + 500).
+
diff --git a/deps/rabbit/test/vhost_SUITE.erl b/deps/rabbit/test/vhost_SUITE.erl
new file mode 100644
index 0000000000..4e6ffe0d74
--- /dev/null
+++ b/deps/rabbit/test/vhost_SUITE.erl
@@ -0,0 +1,381 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(vhost_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_1_direct},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ single_node_vhost_deletion_forces_connection_closure,
+ vhost_failure_forces_connection_closure,
+ vhost_creation_idempotency
+ ],
+ ClusterSize2Tests = [
+ cluster_vhost_deletion_forces_connection_closure,
+ vhost_failure_forces_connection_closure,
+ vhost_failure_forces_connection_closure_on_failure_node,
+ node_starts_with_dead_vhosts,
+ node_starts_with_dead_vhosts_with_mirrors,
+ vhost_creation_idempotency
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_1_direct, [], ClusterSize1Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% see partitions_SUITE
+-define(DELAY, 9000).
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_1_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_1_direct, Config1, 1);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2).
+
+init_per_multinode_group(_Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ Config.
+
+end_per_testcase(Testcase, Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+ case Testcase of
+ cluster_vhost_deletion_forces_connection_closure -> ok;
+ single_node_vhost_deletion_forces_connection_closure -> ok;
+ _ ->
+ delete_vhost(Config, VHost2)
+ end,
+ delete_vhost(Config, VHost1),
+ clear_all_connection_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+delete_vhost(Config, VHost) ->
+ case rabbit_ct_broker_helpers:delete_vhost(Config, VHost) of
+ ok -> ok;
+ {error, {no_such_vhost, _}} -> ok
+ end.
+
+clear_all_connection_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_connection_tracking,
+ clear_tracked_connection_tables_for_this_node,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+single_node_vhost_deletion_forces_connection_closure(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [_Conn2] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)).
+
+vhost_failure_forces_connection_closure(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [_Conn2] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, VHost2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)).
+
+
+vhost_failure_forces_connection_closure_on_failure_node(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [_Conn20] = open_connections(Config, [{0, VHost2}]),
+ [_Conn21] = open_connections(Config, [{1, VHost2}]),
+ ?assertEqual(2, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, 0, VHost2),
+ timer:sleep(200),
+ %% Vhost2 connection on node 1 is still alive
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+ %% Vhost1 connection on node 0 is still alive
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)).
+
+
+cluster_vhost_deletion_forces_connection_closure(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [_Conn2] = open_connections(Config, [{1, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)).
+
+node_starts_with_dead_vhosts(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1, VHost1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+
+ QName = <<"node_starts_with_dead_vhosts-q-1">>,
+ amqp_channel:call(Chan, #'queue.declare'{queue = QName, durable = true}),
+ rabbit_ct_client_helpers:publish(Chan, QName, 10),
+
+ DataStore1 = rabbit_ct_broker_helpers:rpc(
+ Config, 1, rabbit_vhost, msg_store_dir_path, [VHost1]),
+
+ rabbit_ct_broker_helpers:stop_node(Config, 1),
+
+ file:write_file(filename:join(DataStore1, "recovery.dets"), <<"garbage">>),
+
+ %% The node should start without a vhost
+ ok = rabbit_ct_broker_helpers:start_node(Config, 1),
+
+ timer:sleep(3000),
+
+ ?assertEqual(true, rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, is_vhost_alive, [VHost2])).
+
+node_starts_with_dead_vhosts_with_mirrors(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ true = rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, is_vhost_alive, [VHost1]),
+ true = rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, is_vhost_alive, [VHost2]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, check, []),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+
+ QName = <<"node_starts_with_dead_vhosts_with_mirrors-q-0">>,
+ amqp_channel:call(Chan, #'queue.declare'{queue = QName, durable = true}),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_policy, set,
+ [VHost1, <<"mirror">>, <<".*">>, [{<<"ha-mode">>, <<"all">>}],
+ 0, <<"queues">>, <<"acting-user">>]),
+
+ %% Wait for the queue to start a mirror
+ timer:sleep(500),
+
+ rabbit_ct_client_helpers:publish(Chan, QName, 10),
+
+ {ok, Q} = rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ rabbit_amqqueue, lookup,
+ [rabbit_misc:r(VHost1, queue, QName)], infinity),
+
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+
+ [Pid] = amqqueue:get_sync_slave_pids(Q),
+
+ Node1 = node(Pid),
+
+ DataStore1 = rabbit_ct_broker_helpers:rpc(
+ Config, 1, rabbit_vhost, msg_store_dir_path, [VHost1]),
+
+ rabbit_ct_broker_helpers:stop_node(Config, 1),
+
+ file:write_file(filename:join(DataStore1, "recovery.dets"), <<"garbage">>),
+
+ %% The node should start without a vhost
+ ok = rabbit_ct_broker_helpers:start_node(Config, 1),
+
+ timer:sleep(3000),
+
+ ?assertEqual(true, rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, is_vhost_alive, [VHost2])).
+
+vhost_creation_idempotency(Config) ->
+ VHost = <<"idempotency-test">>,
+ try
+ ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)),
+ ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)),
+ ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost))
+ after
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost)
+ end.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndVHosts) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, VHost}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ VHost);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndVHosts),
+ timer:sleep(500),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns),
+ timer:sleep(500).
+
+count_connections_in(Config, VHost) ->
+ count_connections_in(Config, VHost, 0).
+count_connections_in(Config, VHost, NodeIndex) ->
+ timer:sleep(200),
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ count_tracked_items_in, [{vhost, VHost}]).
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_connection_limit(Config, VHost, -1).
+
+set_vhost_connection_limit(Config, VHost, Count) ->
+ set_vhost_connection_limit(Config, 0, VHost, Count).
+
+set_vhost_connection_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-connections\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, _} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, VHost) ->
+ {error, _} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, VHost).